source
stringlengths
3
92
c
stringlengths
26
2.25M
computeStats_c.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <xmmintrin.h> #include <assert.h> #include <math.h> #include <sys/time.h> #include <mex.h> int sort(const void *x, const void *y) { if (*(int*)x > *(int*)y) return 1; else if (*(int*)x < *(int*)y) return -1; else return 0; } void mexFunction (int nlhs, mxArray *plhs[], int nrhs, const mxArray*prhs[]) { /* Input parameters */ /* [0]: Similarity matrix * [1]: queriesClasses * [2]: datasetClasses * [3]: NRelevantsPerQuery, number of relevants for each query. * [4]: queriesIdx (can be -1s) */ /* Output parameters */ /* [0]: p@1 array * [1]: map array * [2]: idx of the best match for each query */ int Nqueries, Ndataset; int *queriesCls; int *datasetCls; int *NRelevantsPerQuery; int *queriesIdx; float *S; /* Read Data */ S = (float*)mxGetData(prhs[0]); Nqueries = (int) mxGetN(prhs[0]); Ndataset = (int) mxGetM(prhs[0]); queriesCls = (int*)mxGetData(prhs[1]); datasetCls = (int*)mxGetData(prhs[2]); NRelevantsPerQuery = (int*)mxGetData(prhs[3]); queriesIdx = (int*)mxGetData(prhs[4]); /* Prepare output */ mwSize dims[1]; dims[0]= Nqueries; plhs[0] = mxCreateNumericArray(1, dims, mxSINGLE_CLASS, mxREAL); plhs[1] = mxCreateNumericArray(1, dims, mxSINGLE_CLASS, mxREAL); plhs[2] = mxCreateNumericArray(1, dims, mxINT32_CLASS, mxREAL); float *pP1 = (float*)mxGetData(plhs[0]); float *pMap = (float*)mxGetData(plhs[1]); int *bestIdx = (int*)mxGetData(plhs[2]); /* one query per row, scores in each column */ /* for each query */ #pragma omp parallel for for (int i=0; i < Nqueries; i++) { pMap[i]=0; pP1[i]=0; /* Create a private list of relevants. */ int *rank = (int*)malloc(NRelevantsPerQuery[i]*sizeof(int)); int Nrelevants = 0; /* Get its class */ int qclass = queriesCls[i]; /* For each element in the dataset */ float bestS=-99999; int p1=0; for (int j=0; j < Ndataset; j++) { float s = S[i*Ndataset + j]; /* Precision at 1 part */ if (queriesIdx[i]!=j && s > bestS) { bestS = s; p1 = datasetCls[j]==qclass; bestIdx[i] = j+1; /* Matlab style */ } /* If it is from the same class and it is not the query idx, it is a relevant one. */ /* Compute how many on the dataset get a better score and how many get an equal one, excluding itself and the query.*/ if (datasetCls[j]==qclass && queriesIdx[i]!=j) { int better=0; int equal = 0; for (int k=0; k < Ndataset; k++) { if (k!=j && queriesIdx[i]!=k) { float s2 = S[i*Ndataset + k]; if (s2> s) better++; else if (s2==s) equal++; } } rank[Nrelevants]=better+floor(equal/2.0); Nrelevants++; } } /* Sort the ranked positions) */ qsort(rank, Nrelevants, sizeof(int), sort); pP1[i] = p1; /* Get mAP and store it */ for(int j=0;j<Nrelevants;j++){ /* if rank[i] >=k it was not on the topk. Since they are sorted, that means bail out already */ float prec_at_k = ((float)(j+1))/(rank[j]+1); //mexPrintf("prec_at_k: %f\n", prec_at_k); pMap[i] += prec_at_k; } pMap[i]/=Nrelevants; } return; }
BKTree.h
#ifndef _SPTAG_COMMON_BKTREE_H_ #define _SPTAG_COMMON_BKTREE_H_ #include <iostream> #include <stack> #include <string> #include <vector> #include "../VectorIndex.h" #include "CommonUtils.h" #include "QueryResultSet.h" #include "WorkSpace.h" #pragma warning(disable:4996) // 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details. namespace SPTAG { namespace COMMON { // node type for storing BKT struct BKTNode { int centerid; int childStart; int childEnd; BKTNode(int cid = -1) : centerid(cid), childStart(-1), childEnd(-1) {} }; template <typename T> struct KmeansArgs { int _K; int _D; int _T; T* centers; int* counts; float* newCenters; int* newCounts; char* label; int* clusterIdx; float* clusterDist; T* newTCenters; KmeansArgs(int k, int dim, int datasize, int threadnum) : _K(k), _D(dim), _T(threadnum) { centers = new T[k * dim]; counts = new int[k]; newCenters = new float[threadnum * k * dim]; newCounts = new int[threadnum * k]; label = new char[datasize]; clusterIdx = new int[threadnum * k]; clusterDist = new float[threadnum * k]; newTCenters = new T[k * dim]; } ~KmeansArgs() { delete[] centers; delete[] counts; delete[] newCenters; delete[] newCounts; delete[] label; delete[] clusterIdx; delete[] clusterDist; delete[] newTCenters; } inline void ClearCounts() { memset(newCounts, 0, sizeof(int) * _T * _K); } inline void ClearCenters() { memset(newCenters, 0, sizeof(float) * _T * _K * _D); } inline void ClearDists(float dist) { for (int i = 0; i < _T * _K; i++) { clusterIdx[i] = -1; clusterDist[i] = dist; } } void Shuffle(std::vector<int>& indices, int first, int last) { int* pos = new int[_K]; pos[0] = first; for (int k = 1; k < _K; k++) pos[k] = pos[k - 1] + newCounts[k - 1]; for (int k = 0; k < _K; k++) { if (newCounts[k] == 0) continue; int i = pos[k]; while (newCounts[k] > 0) { int swapid = pos[(int)(label[i])] + newCounts[(int)(label[i])] - 1; newCounts[(int)(label[i])]--; std::swap(indices[i], indices[swapid]); std::swap(label[i], label[swapid]); } while (indices[i] != clusterIdx[k]) i++; std::swap(indices[i], indices[pos[k] + counts[k] - 1]); } delete[] pos; } }; class BKTree { public: BKTree(): m_iTreeNumber(1), m_iBKTKmeansK(32), m_iBKTLeafSize(8), m_iSamples(1000) {} BKTree(BKTree& other): m_iTreeNumber(other.m_iTreeNumber), m_iBKTKmeansK(other.m_iBKTKmeansK), m_iBKTLeafSize(other.m_iBKTLeafSize), m_iSamples(other.m_iSamples) {} ~BKTree() {} inline const BKTNode& operator[](int index) const { return m_pTreeRoots[index]; } inline BKTNode& operator[](int index) { return m_pTreeRoots[index]; } inline int size() const { return (int)m_pTreeRoots.size(); } inline const std::unordered_map<int, int>& GetSampleMap() const { return m_pSampleCenterMap; } template <typename T> void BuildTrees(VectorIndex* index, std::vector<int>* indices = nullptr) { struct BKTStackItem { int index, first, last; BKTStackItem(int index_, int first_, int last_) : index(index_), first(first_), last(last_) {} }; std::stack<BKTStackItem> ss; std::vector<int> localindices; if (indices == nullptr) { localindices.resize(index->GetNumSamples()); for (int i = 0; i < index->GetNumSamples(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } KmeansArgs<T> args(m_iBKTKmeansK, index->GetFeatureDim(), (int)localindices.size(), omp_get_num_threads()); m_pSampleCenterMap.clear(); for (char i = 0; i < m_iTreeNumber; i++) { std::random_shuffle(localindices.begin(), localindices.end()); m_pTreeStart.push_back((int)m_pTreeRoots.size()); m_pTreeRoots.push_back(BKTNode((int)localindices.size())); std::cout << "Start to build BKTree " << i + 1 << std::endl; ss.push(BKTStackItem(m_pTreeStart[i], 0, (int)localindices.size())); while (!ss.empty()) { BKTStackItem item = ss.top(); ss.pop(); int newBKTid = (int)m_pTreeRoots.size(); m_pTreeRoots[item.index].childStart = newBKTid; if (item.last - item.first <= m_iBKTLeafSize) { for (int j = item.first; j < item.last; j++) { m_pTreeRoots.push_back(BKTNode(localindices[j])); } } else { // clustering the data into BKTKmeansK clusters int numClusters = KmeansClustering(index, localindices, item.first, item.last, args); if (numClusters <= 1) { int end = min(item.last + 1, (int)localindices.size()); std::sort(localindices.begin() + item.first, localindices.begin() + end); m_pTreeRoots[item.index].centerid = localindices[item.first]; m_pTreeRoots[item.index].childStart = -m_pTreeRoots[item.index].childStart; for (int j = item.first + 1; j < end; j++) { m_pTreeRoots.push_back(BKTNode(localindices[j])); m_pSampleCenterMap[localindices[j]] = m_pTreeRoots[item.index].centerid; } m_pSampleCenterMap[-1 - m_pTreeRoots[item.index].centerid] = item.index; } else { for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.counts[k] == 0) continue; m_pTreeRoots.push_back(BKTNode(localindices[item.first + args.counts[k] - 1])); if (args.counts[k] > 1) ss.push(BKTStackItem(newBKTid++, item.first, item.first + args.counts[k] - 1)); item.first += args.counts[k]; } } } m_pTreeRoots[item.index].childEnd = (int)m_pTreeRoots.size(); } std::cout << i + 1 << " BKTree built, " << m_pTreeRoots.size() - m_pTreeStart[i] << " " << localindices.size() << std::endl; } } bool SaveTrees(std::string sTreeFileName) const { std::cout << "Save BKT to " << sTreeFileName << std::endl; FILE *fp = fopen(sTreeFileName.c_str(), "wb"); if (fp == NULL) return false; fwrite(&m_iTreeNumber, sizeof(int), 1, fp); fwrite(m_pTreeStart.data(), sizeof(int), m_iTreeNumber, fp); int treeNodeSize = (int)m_pTreeRoots.size(); fwrite(&treeNodeSize, sizeof(int), 1, fp); fwrite(m_pTreeRoots.data(), sizeof(BKTNode), treeNodeSize, fp); fclose(fp); std::cout << "Save BKT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool LoadTrees(char* pBKTMemFile) { m_iTreeNumber = *((int*)pBKTMemFile); pBKTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pBKTMemFile, sizeof(int) * m_iTreeNumber); pBKTMemFile += sizeof(int)*m_iTreeNumber; int treeNodeSize = *((int*)pBKTMemFile); pBKTMemFile += sizeof(int); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pBKTMemFile, sizeof(BKTNode) * treeNodeSize); return true; } bool LoadTrees(std::string sTreeFileName) { std::cout << "Load BKT From " << sTreeFileName << std::endl; FILE *fp = fopen(sTreeFileName.c_str(), "rb"); if (fp == NULL) return false; fread(&m_iTreeNumber, sizeof(int), 1, fp); m_pTreeStart.resize(m_iTreeNumber); fread(m_pTreeStart.data(), sizeof(int), m_iTreeNumber, fp); int treeNodeSize; fread(&treeNodeSize, sizeof(int), 1, fp); m_pTreeRoots.resize(treeNodeSize); fread(m_pTreeRoots.data(), sizeof(BKTNode), treeNodeSize, fp); fclose(fp); std::cout << "Load BKT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } template <typename T> void InitSearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const { for (char i = 0; i < m_iTreeNumber; i++) { const BKTNode& node = m_pTreeRoots[m_pTreeStart[i]]; if (node.childStart < 0) { p_space.m_SPTQueue.insert(COMMON::HeapCell(m_pTreeStart[i], p_index->ComputeDistance((const void*)p_query.GetTarget(), p_index->GetSample(node.centerid)))); } else { for (int begin = node.childStart; begin < node.childEnd; begin++) { int index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, p_index->ComputeDistance((const void*)p_query.GetTarget(), p_index->GetSample(index)))); } } } } template <typename T> void SearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const { do { COMMON::HeapCell bcell = p_space.m_SPTQueue.pop(); const BKTNode& tnode = m_pTreeRoots[bcell.node]; if (tnode.childStart < 0) { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_iNumberOfCheckedLeaves++; p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance)); } if (p_space.m_iNumberOfCheckedLeaves >= p_limits) break; } else { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance)); } for (int begin = tnode.childStart; begin < tnode.childEnd; begin++) { int index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, p_index->ComputeDistance((const void*)p_query.GetTarget(), p_index->GetSample(index)))); } } } while (!p_space.m_SPTQueue.empty()); } private: template <typename T> float KmeansAssign(VectorIndex* p_index, std::vector<int>& indices, const int first, const int last, KmeansArgs<T>& args, const bool updateCenters) const { float currDist = 0; int threads = omp_get_num_threads(); float lambda = (updateCenters) ? COMMON::Utils::GetBase<T>() * COMMON::Utils::GetBase<T>() / (100.0f * (last - first)) : 0.0f; int subsize = (last - first - 1) / threads + 1; #pragma omp parallel for for (int tid = 0; tid < threads; tid++) { int istart = first + tid * subsize; int iend = min(first + (tid + 1) * subsize, last); int *inewCounts = args.newCounts + tid * m_iBKTKmeansK; float *inewCenters = args.newCenters + tid * m_iBKTKmeansK * p_index->GetFeatureDim(); int * iclusterIdx = args.clusterIdx + tid * m_iBKTKmeansK; float * iclusterDist = args.clusterDist + tid * m_iBKTKmeansK; float idist = 0; for (int i = istart; i < iend; i++) { int clusterid = 0; float smallestDist = MaxDist; for (int k = 0; k < m_iBKTKmeansK; k++) { float dist = p_index->ComputeDistance(p_index->GetSample(indices[i]), (const void*)(args.centers + k*p_index->GetFeatureDim())) + lambda*args.counts[k]; if (dist > -MaxDist && dist < smallestDist) { clusterid = k; smallestDist = dist; } } args.label[i] = clusterid; inewCounts[clusterid]++; idist += smallestDist; if (updateCenters) { const T* v = (const T*)p_index->GetSample(indices[i]); float* center = inewCenters + clusterid*p_index->GetFeatureDim(); for (int j = 0; j < p_index->GetFeatureDim(); j++) center[j] += v[j]; if (smallestDist > iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } else { if (smallestDist <= iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } } COMMON::Utils::atomic_float_add(&currDist, idist); } for (int i = 1; i < threads; i++) { for (int k = 0; k < m_iBKTKmeansK; k++) args.newCounts[k] += args.newCounts[i*m_iBKTKmeansK + k]; } if (updateCenters) { for (int i = 1; i < threads; i++) { float* currCenter = args.newCenters + i*m_iBKTKmeansK*p_index->GetFeatureDim(); for (int j = 0; j < m_iBKTKmeansK * p_index->GetFeatureDim(); j++) args.newCenters[j] += currCenter[j]; } int maxcluster = 0; for (int k = 1; k < m_iBKTKmeansK; k++) if (args.newCounts[maxcluster] < args.newCounts[k]) maxcluster = k; int maxid = maxcluster; for (int tid = 1; tid < threads; tid++) { if (args.clusterDist[maxid] < args.clusterDist[tid * m_iBKTKmeansK + maxcluster]) maxid = tid * m_iBKTKmeansK + maxcluster; } if (args.clusterIdx[maxid] < 0 || args.clusterIdx[maxid] >= p_index->GetNumSamples()) std::cout << "first:" << first << " last:" << last << " maxcluster:" << maxcluster << "(" << args.newCounts[maxcluster] << ") Error maxid:" << maxid << " dist:" << args.clusterDist[maxid] << std::endl; maxid = args.clusterIdx[maxid]; for (int k = 0; k < m_iBKTKmeansK; k++) { T* TCenter = args.newTCenters + k * p_index->GetFeatureDim(); if (args.newCounts[k] == 0) { //int nextid = Utils::rand_int(last, first); //while (args.label[nextid] != maxcluster) nextid = Utils::rand_int(last, first); int nextid = maxid; std::memcpy(TCenter, p_index->GetSample(nextid), sizeof(T)*p_index->GetFeatureDim()); } else { float* currCenters = args.newCenters + k * p_index->GetFeatureDim(); for (int j = 0; j < p_index->GetFeatureDim(); j++) currCenters[j] /= args.newCounts[k]; if (p_index->GetDistCalcMethod() == DistCalcMethod::Cosine) { COMMON::Utils::Normalize(currCenters, p_index->GetFeatureDim(), COMMON::Utils::GetBase<T>()); } for (int j = 0; j < p_index->GetFeatureDim(); j++) TCenter[j] = (T)(currCenters[j]); } } } else { for (int i = 1; i < threads; i++) { for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.clusterIdx[i*m_iBKTKmeansK + k] != -1 && args.clusterDist[i*m_iBKTKmeansK + k] <= args.clusterDist[k]) { args.clusterDist[k] = args.clusterDist[i*m_iBKTKmeansK + k]; args.clusterIdx[k] = args.clusterIdx[i*m_iBKTKmeansK + k]; } } } } return currDist; } template <typename T> int KmeansClustering(VectorIndex* p_index, std::vector<int>& indices, const int first, const int last, KmeansArgs<T>& args) const { int iterLimit = 100; int batchEnd = min(first + m_iSamples, last); float currDiff, currDist, minClusterDist = MaxDist; for (int numKmeans = 0; numKmeans < 3; numKmeans++) { for (int k = 0; k < m_iBKTKmeansK; k++) { int randid = COMMON::Utils::rand_int(last, first); std::memcpy(args.centers + k*p_index->GetFeatureDim(), p_index->GetSample(indices[randid]), sizeof(T)*p_index->GetFeatureDim()); } args.ClearCounts(); currDist = KmeansAssign(p_index, indices, first, batchEnd, args, false); if (currDist < minClusterDist) { minClusterDist = currDist; memcpy(args.newTCenters, args.centers, sizeof(T)*m_iBKTKmeansK*p_index->GetFeatureDim()); memcpy(args.counts, args.newCounts, sizeof(int) * m_iBKTKmeansK); } } minClusterDist = MaxDist; int noImprovement = 0; for (int iter = 0; iter < iterLimit; iter++) { std::memcpy(args.centers, args.newTCenters, sizeof(T)*m_iBKTKmeansK*p_index->GetFeatureDim()); std::random_shuffle(indices.begin() + first, indices.begin() + last); args.ClearCenters(); args.ClearCounts(); args.ClearDists(-MaxDist); currDist = KmeansAssign(p_index, indices, first, batchEnd, args, true); memcpy(args.counts, args.newCounts, sizeof(int)*m_iBKTKmeansK); currDiff = 0; for (int k = 0; k < m_iBKTKmeansK; k++) { currDiff += p_index->ComputeDistance((const void*)(args.centers + k*p_index->GetFeatureDim()), (const void*)(args.newTCenters + k*p_index->GetFeatureDim())); } if (currDist < minClusterDist) { noImprovement = 0; minClusterDist = currDist; } else { noImprovement++; } if (currDiff < 1e-3 || noImprovement >= 5) break; } args.ClearCounts(); args.ClearDists(MaxDist); currDist = KmeansAssign(p_index, indices, first, last, args, false); memcpy(args.counts, args.newCounts, sizeof(int)*m_iBKTKmeansK); int numClusters = 0; for (int i = 0; i < m_iBKTKmeansK; i++) if (args.counts[i] > 0) numClusters++; if (numClusters <= 1) { //if (last - first > 1) std::cout << "large cluster:" << last - first << " dist:" << currDist << std::endl; return numClusters; } args.Shuffle(indices, first, last); return numClusters; } private: std::vector<int> m_pTreeStart; std::vector<BKTNode> m_pTreeRoots; std::unordered_map<int, int> m_pSampleCenterMap; public: int m_iTreeNumber, m_iBKTKmeansK, m_iBKTLeafSize, m_iSamples; }; } } #endif
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
convolution_winograd_transform_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; v4f32 _v5_25 = __msa_fill_w_f32(5.25f); v4f32 _vm4_25 = __msa_fill_w_f32(-4.25f); v4f32 _vm1_25 = __msa_fill_w_f32(-1.25f); v4f32 _v0_25 = __msa_fill_w_f32(0.25f); v4f32 _vm2_5 = __msa_fill_w_f32(-2.5f); v4f32 _v0_5 = __msa_fill_w_f32(0.5f); v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r06 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r07 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fsub_w(_r00, _r06), _v5_25, __msa_fsub_w(_r04, _r02)); v4f32 _tmp7m = __msa_fmadd_w(__msa_fsub_w(_r07, _r01), _v5_25, __msa_fsub_w(_r03, _r05)); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp7m, tmp[7][m], 0); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_r02, _r06), _vm4_25, _r04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_r01, _r05), _vm4_25, _r03); v4f32 _tmp1m = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _tmp2m = __msa_fsub_w(_tmp12a, _tmp12b); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_r06, _v0_25, _r02), _vm1_25, _r04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v0_5), _vm2_5, _r03), _v2, _r05); v4f32 _tmp3m = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _tmp4m = __msa_fsub_w(_tmp34a, _tmp34b); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp56a = __msa_fmadd_w(_r06, _v4, __msa_fmadd_w(_r02, _vm1_25, _r04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v2), _vm2_5, _r03), _v0_5, _r05); v4f32 _tmp5m = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _tmp6m = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); __msa_st_w((v4i32)_tmp6m, tmp[6][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fsub_w(_tmp00, _tmp06), _v5_25, __msa_fsub_w(_tmp04, _tmp02)); v4f32 _r0tm7 = __msa_fmadd_w(__msa_fsub_w(_tmp07, _tmp01), _v5_25, __msa_fsub_w(_tmp03, _tmp05)); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_tmp02, _tmp06), _vm4_25, _tmp04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_tmp01, _tmp05), _vm4_25, _tmp03); v4f32 _r0tm1 = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _r0tm2 = __msa_fsub_w(_tmp12a, _tmp12b); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_tmp06, _v0_25, _tmp02), _vm1_25, _tmp04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v0_5), _vm2_5, _tmp03), _v2, _tmp05); v4f32 _r0tm3 = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _r0tm4 = __msa_fsub_w(_tmp34a, _tmp34b); v4f32 _tmp56a = __msa_fmadd_w(_tmp06, _v4, __msa_fmadd_w(_tmp02, _vm1_25, _tmp04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v2), _vm2_5, _tmp03), _v0_5, _tmp05); v4f32 _r0tm5 = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _r0tm6 = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); __msa_st_w((v4i32)_r0tm6, r0_tm_6, 0); __msa_st_w((v4i32)_r0tm7, r0_tm_7, 0); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); v4f32 _bias0 = biasptr ? (v4f32)__msa_ld_w(biasptr + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[6][8][4]; v4f32 _v32 = __msa_fill_w_f32(32.f); v4f32 _v16 = __msa_fill_w_f32(16.f); v4f32 _v8 = __msa_fill_w_f32(8.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row<float>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _out0tm6 = (v4f32)__msa_ld_w(output0_tm_6, 0); v4f32 _out0tm7 = (v4f32)__msa_ld_w(output0_tm_7, 0); v4f32 _tmp024a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp135a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp024b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp135b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp024c = __msa_fadd_w(_out0tm5, _out0tm6); v4f32 _tmp135c = __msa_fsub_w(_out0tm5, _out0tm6); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c); v4f32 _tmp4m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp1m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c); v4f32 _tmp3m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c); v4f32 _tmp5m = __msa_fadd_w(__msa_fadd_w(_out0tm7, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b)); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _tmp024a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp135a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp024b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp135b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _tmp024c = __msa_fadd_w(_tmp05, _tmp06); v4f32 _tmp135c = __msa_fsub_w(_tmp05, _tmp06); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c))); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c)); v4f32 _out04 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out04, output0 + 4 * 4, 0); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c)); v4f32 _out05 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp07, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b))); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); __msa_st_w((v4i32)_out05, output0 + 4 * 5, 0); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; v4f32 _vm5 = __msa_fill_w_f32(-5.f); v4f32 _vm4 = __msa_fill_w_f32(-4.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _vm2 = __msa_fill_w_f32(-2.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fmadd_w(_r04, _v4, _r00), _vm5, _r02); v4f32 _tmp1m = __msa_fmadd_w(__msa_fadd_w(_r04, _r03), _vm4, __msa_fadd_w(_r01, _r02)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fsub_w(_r04, _r03), _v4, __msa_fsub_w(_r01, _r02)); v4f32 _tmp3m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _vm2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp4m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _v2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp5m = __msa_fmadd_w(__msa_fmadd_w(_r05, _v4, _r01), _vm5, _r03); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fmadd_w(_tmp04, _v4, _tmp00), _vm5, _tmp02); v4f32 _r0tm1 = __msa_fmadd_w(__msa_fadd_w(_tmp04, _tmp03), _vm4, __msa_fadd_w(_tmp01, _tmp02)); v4f32 _r0tm2 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp03), _v4, __msa_fsub_w(_tmp01, _tmp02)); v4f32 _r0tm3 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _vm2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm4 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _v2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm5 = __msa_fmadd_w(__msa_fmadd_w(_tmp05, _v4, _tmp01), _vm5, _tmp03); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); v4f32 _bias0 = biasptr ? (v4f32)__msa_ld_w(biasptr + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[4][6][4]; v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v8 = __msa_fill_w_f32(8.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row<float>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _tmp02a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp13a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp02b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp13b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp02a), _tmp02b); v4f32 _tmp1m = __msa_fmadd_w(_tmp13a, _v2, _tmp13b); v4f32 _tmp2m = __msa_fmadd_w(_tmp02a, _v4, _tmp02b); v4f32 _tmp3m = __msa_fmadd_w(__msa_fadd_w(_out0tm5, _tmp13a), _v8, _tmp13b); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp02a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp13a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp02b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp13b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp02a), _tmp02b)); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp13a, _v2, _tmp13b)); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp02a, _v4, _tmp02b)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fadd_w(_tmp05, _tmp13a), _v8, _tmp13b)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); output0 += outw * 4; } } } } }
push_relabel_segment.h
/* * Parallel implementation of push-relabel algorithm, divides the network into multiple segments. */ #ifndef MAXFLOW_GOLDBERG_CR_H #define MAXFLOW_GOLDBERG_CR_H #include "../../common_types.h" #include "../../data_structures/linked_list.h" #include "../../data_structures/thread_local_buffer_pool.h" #include "partitioning.h" #include <memory> #include <cassert> #include <chrono> #include <cstring> #include <omp.h> #include <algorithm> #include <atomic> #include <set> #ifndef CACHE_LINE_SIZE #define CACHE_LINE_SIZE 64 #endif namespace push_relabel_segment { template <template <class> typename vector, typename T, typename U> class max_flow_instance { struct alignas (CACHE_LINE_SIZE) vertex { vertex * next = nullptr; vertex * prev = nullptr; U excess { 0 }; T label; T original_label; std::atomic_flag discovered = ATOMIC_FLAG_INIT; }; struct label_info { data_structures::linked_list<vertex> active_vertices { }; data_structures::linked_list<vertex> inactive_vertices { }; void reset ( ) noexcept { active_vertices . clear (); inactive_vertices . clear (); } }; vector<vector<cached_edge<T, U>>> & _residual_network; std::unique_ptr<label_info[]> _labels; std::unique_ptr<vertex[]> _vertices; data_structures::thread_local_buffer_pool<T> _pool; std::unique_ptr<T[]> _q; std::unique_ptr<label_info[]> _thread_local_labels; T _source, _sink, _highest_active { 0 }, _highest_vertex { 0 }; std::size_t _thread_count, _original_relabel_threshold { 0 }; const std::size_t _max_thread_count; int64_t _min_cpu_time_per_phase { 0 }; std::atomic<std::size_t> _relabel_threshold { 0 }; public: max_flow_instance ( vector<vector<cached_edge<T, U>>> & graph, T source, T sink, std::size_t thread_count = static_cast<size_t>(omp_get_max_threads ()) ) : _residual_network ( graph ), _labels ( std::make_unique<label_info[]> ( _residual_network . size () + 1 ) ), _vertices ( std::make_unique<vertex[]> ( _residual_network . size () ) ), _pool ( data_structures::thread_local_buffer_pool<T> { thread_count, _residual_network . size () } ), _q ( std::make_unique<T[]> ( _residual_network . size () ) ), _thread_local_labels ( std::make_unique<label_info[]> ( thread_count ) ), _source ( source ), _sink ( sink ), _thread_count ( thread_count ), _max_thread_count ( thread_count ) { omp_set_num_threads ( static_cast<int> ( _max_thread_count ) ); init (); } U find_max_flow ( ) { global_relabel (); while ( _highest_active != 0 ) { parallel_phase (); global_relabel (); } return _vertices[_sink] . excess; } void preflow_to_flow ( ) { std::swap ( _source, _sink ); _highest_vertex = _residual_network . size (); find_max_flow (); std::swap ( _source, _sink ); #ifdef DEBUG for ( std::size_t i = 0; i < _residual_network . size(); ++i ) if ( i != _source && i != _sink ) if ( _vertices[i] . excess > 0 ) std::cerr << "Excess violation: vertex " << i << ", excess " << _vertices[i] . excess << '\n'; #endif } auto steal_network ( ) { return std::move ( _residual_network ); } private: static constexpr T ALPHA = 6, BETA = 12; static constexpr double GLOBAL_RELABEL_FREQ = 1; static constexpr T min_active_per_thread = 10; void init ( ) noexcept { #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network[_source] . size (); ++i ) { auto & edge = _residual_network[_source][i]; _vertices[edge . dst_vertex] . excess = edge . r_capacity; edge . reverse_r_capacity += edge . r_capacity; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += edge . r_capacity; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= edge . r_capacity; edge . r_capacity = 0; } std::size_t m = 0; for ( std::size_t i = 0; i < _residual_network . size (); ++i ) m += _residual_network[i] . size (); _original_relabel_threshold = ( _residual_network . size () * ALPHA + m / 2 ); } struct thread_local_data { int64_t & cpu_time; const T low; const T high; T & highest_active; T & highest_vertex; T relabel_progress; }; void parallel_phase ( ) { for ( ;; ) { _relabel_threshold = _original_relabel_threshold; const auto partitions = partitioning::get_partitions ( _labels, _highest_active, _thread_count, min_active_per_thread ); const auto actual_thread_cnt = partitions . size () - 1; omp_set_num_threads ( static_cast<int> ( actual_thread_cnt ) ); int64_t cpu_time = 0; if ( actual_thread_cnt == 1 ) { push_relabel ( thread_local_data { cpu_time, 0, static_cast<T> ( _residual_network . size () ), _highest_active, _highest_vertex, 0 } ); _thread_count = std::min ( _thread_count * 2, _max_thread_count ); return; } T highest_active = 0, highest_vertex = 0; #pragma omp parallel for schedule(static) reduction(+:cpu_time) reduction(max:highest_active) reduction(max:highest_vertex) for ( std::size_t i = 0; i < actual_thread_cnt; ++i ) { T low = partitions[i], high = partitions[i + 1]; highest_active = highest_vertex = high - 1; push_relabel ( thread_local_data { cpu_time, low, high, highest_active, highest_vertex, 0 } ); _relabel_threshold -= _original_relabel_threshold / actual_thread_cnt; //add back vertices that are still active but couldn't have been relabeled to higher partition _labels[high - 1] . active_vertices . append_list ( _thread_local_labels[omp_get_thread_num ()] . active_vertices ); if ( !_labels[high - 1] . active_vertices . empty () ) highest_active = highest_vertex = high - 1; } _highest_active = highest_active; _highest_vertex = std::max ( _highest_vertex, highest_vertex ); if ( cpu_time > _min_cpu_time_per_phase ) { _thread_count = std::min ( _thread_count * 2, _max_thread_count ); return; } _thread_count = std::max ( actual_thread_cnt / 2, std::size_t { 1 } ); omp_set_num_threads ( static_cast<int> ( _max_thread_count ) ); #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network . size (); ++i ) _vertices[i] . original_label = _vertices[i] . label; } } void push_relabel ( thread_local_data data ) noexcept { auto start = std::chrono::high_resolution_clock::now (); for ( ;; ) { auto node = get_active_vertex ( data . highest_active, data . low ); auto label = data . highest_active; if ( node == nullptr ) break; discharge ( node, label, data ); if ( data . relabel_progress * GLOBAL_RELABEL_FREQ >= _relabel_threshold . load ( std::memory_order_relaxed ) ) { break; } } auto end = std::chrono::high_resolution_clock::now (); data . cpu_time += std::chrono::duration_cast<std::chrono::milliseconds> ( end - start ) . count (); } inline auto get_active_vertex ( T & highest_active, const T low ) noexcept { for ( T i = 0; i < highest_active - low; ++i ) //don't take vertices in low { if ( _labels[highest_active - i] . active_vertices . empty () ) continue; auto * node = _labels[highest_active - i] . active_vertices . pop (); highest_active -= i; return node; } return static_cast<vertex *> (nullptr); } inline T get_vertex_idx ( vertex * n ) const noexcept { return std::distance ( _vertices . get (), n ); } inline void discharge ( vertex * v, T label, thread_local_data & data ) noexcept { T vertex = get_vertex_idx ( v ); for ( ;; ) { if ( push ( vertex, label, data ) ) { _labels[label] . inactive_vertices . push ( v ); return; } label = relabel ( vertex, label, data ); if ( label >= data . high ) return; } } //original labels have to be set before the parallel phase starts and they don't change until the next one inline bool same_thread ( const T original_label, const T low, const T high ) const noexcept { return original_label >= low && original_label < high; } inline bool push ( const T vertex, const T label, const thread_local_data & data ) noexcept { const auto target_label = label - 1; for ( auto & edge : _residual_network[vertex] ) { if ( edge . r_capacity > 0 && same_thread ( _vertices[edge . dst_vertex] . original_label, data . low, data . high ) && _vertices[edge . dst_vertex] . label == target_label ) { auto flow = std::min ( _vertices[vertex] . excess, edge . r_capacity ); if ( _vertices[edge . dst_vertex] . excess == 0 && edge . dst_vertex != _sink ) { auto * node = &_vertices[edge . dst_vertex]; _labels[target_label] . inactive_vertices . remove ( node ); _labels[target_label] . active_vertices . push ( node ); } _vertices[vertex] . excess -= flow; _vertices[edge . dst_vertex] . excess += flow; edge . r_capacity -= flow; edge . reverse_r_capacity += flow; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= flow; _residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += flow; if ( _vertices[vertex] . excess == 0 ) return true; } } return false; } inline T relabel ( const T vertex, const T current_label, thread_local_data & data ) noexcept { data . relabel_progress += BETA; const auto new_label = calculate_new_label ( vertex, data ); _vertices[vertex] . label = std::min ( new_label, data . high - 1 ); if ( new_label < data . high ) { data . highest_vertex = std::max ( data . highest_vertex, new_label ); data . highest_active = new_label - 1; } else if ( data . high != _residual_network . size () ) //vertex is still active, but we cannot relabel it to another partition, so we remember it and add it back to active vertices at the end of this phase _thread_local_labels[omp_get_thread_num ()] . active_vertices . push ( &_vertices[vertex] ); if ( _labels[current_label] . active_vertices . empty () && _labels[current_label] . inactive_vertices . empty () && current_label != data . high - 1 ) { gap_relabel ( current_label, data ); _vertices[vertex] . label = _residual_network . size (); } return new_label; } inline T calculate_new_label ( const T vertex, thread_local_data & data ) noexcept { T increase_to = data . high - 1; for ( auto & edge : _residual_network[vertex] ) { if ( edge . r_capacity == 0 || !same_thread ( _vertices[edge . dst_vertex] . original_label, data . low, data . high ) ) continue; increase_to = std::min ( increase_to, _vertices[edge . dst_vertex] . label ); } data . relabel_progress += _residual_network[vertex] . size (); return increase_to + 1; } void global_relabel ( ) noexcept { auto start = std::chrono::high_resolution_clock::now (); omp_set_num_threads ( static_cast<int> ( _max_thread_count ) ); const auto not_reached = _residual_network . size (); #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < _residual_network . size (); ++i ) { _vertices[i] . discovered . clear ( std::memory_order_relaxed ); _vertices[i] . label = _vertices[i] . original_label = not_reached; } #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i <= _highest_vertex; ++i ) _labels[i] . reset (); _vertices[_sink] . label = _vertices[_sink] . original_label = 0; _vertices[_sink] . discovered . test_and_set ( std::memory_order_relaxed ); _highest_active = 0; _q[0] = _sink; std::size_t current_queue_size = 1; T current_distance = 0; while ( current_queue_size > 0 ) { #pragma omp parallel for schedule(static) for ( std::size_t i = 0; i < current_queue_size; ++i ) { auto thr_id = omp_get_thread_num (); auto current_vertex = _q[i]; for ( auto edge : _residual_network[current_vertex] ) { if ( edge . reverse_r_capacity > 0 ) { if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) ) { _vertices[edge . dst_vertex] . label = current_distance + 1; _vertices[edge . dst_vertex] . original_label = current_distance + 1; _pool . push_back ( edge . dst_vertex, static_cast<std::size_t>(thr_id) ); auto * node = &_vertices[edge . dst_vertex]; if ( _vertices[edge . dst_vertex] . excess > 0 ) _thread_local_labels[thr_id] . active_vertices . push ( node ); else _thread_local_labels[thr_id] . inactive_vertices . push ( node ); } } } } current_queue_size = _pool . swap_data ( _q ); ++current_distance; for ( std::size_t i = 0; i < _max_thread_count; ++i ) //append together all thread_local info { _labels[current_distance] . active_vertices . append_list ( _thread_local_labels[i] . active_vertices ); _labels[current_distance] . inactive_vertices . append_list ( _thread_local_labels[i] . inactive_vertices ); } if ( !_labels[current_distance] . active_vertices . empty () ) _highest_active = current_distance; } _highest_vertex = current_distance - 1; omp_set_num_threads ( static_cast<int> ( _thread_count ) ); auto end = std::chrono::high_resolution_clock::now (); _min_cpu_time_per_phase = std::chrono::duration_cast<std::chrono::milliseconds> ( end - start ) . count (); } //gap heuristic restricted to single segment void gap_relabel ( const T gap_height, const thread_local_data & data ) noexcept { for ( auto current_height = gap_height + 1; current_height <= data . highest_vertex; ++current_height ) { while ( !_labels[current_height] . active_vertices . empty () ) { auto * ptr = _labels[current_height] . active_vertices . pop (); auto vertex_idx = get_vertex_idx ( ptr ); _vertices[vertex_idx] . label = _residual_network . size (); } while ( !_labels[current_height] . inactive_vertices . empty () ) { auto * ptr = _labels[current_height] . inactive_vertices . pop (); auto vertex_idx = get_vertex_idx ( ptr ); _vertices[vertex_idx] . label = _residual_network . size (); } } data . highest_vertex = data . highest_active = std::max ( gap_height - 1, data . low ); } }; } #endif //MAXFLOW_GOLDBERG_CR_H
Example_nesting_restrict.4.c
/* * @@name: nesting_restrict.4c * @@type: C * @@compilable: no * @@linkable: no * @@expect: failure */ void work(int i, int j) {} void wrong4(int n) { #pragma omp parallel default(shared) { int i; #pragma omp for for (i=0; i<n; i++) { work(i, 0); /* incorrect nesting of barrier region in a loop region */ #pragma omp barrier work(i, 1); } } }
dgemm.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> /* Include redundant execution header. */ #include "../../../include/ourRMTlib.h" #define DGEMM_RESTRICT __restrict__ //global variables int N = 256; int repeats = 30; double alpha = 1.0; double beta = 1.0; // ------------------------------------------------------- // // Function: get_seconds // // Vendor may modify this call to provide higher resolution // timing if required // ------------------------------------------------------- // double get_seconds() { struct timeval now; gettimeofday(&now, NULL); const double seconds = (double) now.tv_sec; const double usec = (double) now.tv_usec; return seconds + (usec * 1.0e-6); } //function out matrix multiplication! double matrix_multiply(double* matrixA, double* matrixB, double* matrixC, double* sum){ int r, i, j, k, check_threads=0, nthreads; #pragma omp parallel for private(j,k) for(i = 0; i < N; i++) { int ID = omp_get_thread_num(); if(ID==0 && check_threads==0){ nthreads=omp_get_num_threads(); printf("matrix_multiply: Number of threads = %d\n",nthreads); check_threads=1; } for(j = 0; j < N; j++) { *sum = 0; for(k = 0; k < N; k++) { *sum += matrixA[i*N + k] * matrixB[k*N + j]; } matrixC[i*N + j] = (alpha * (*sum)) + (beta * matrixC[i*N + j]); } } } // ------------------------------------------------------- // // Function: main // // Modify only in permitted regions (see comments in the // function) // ------------------------------------------------------- // int main(int argc, char* argv[]) { // ------------------------------------------------------- // // DO NOT CHANGE CODE BELOW // ------------------------------------------------------- // if(argc > 1) { N = atoi(argv[1]); printf("Matrix size input by command line: %d\n", N); if(argc > 2) { repeats = atoi(argv[2]); /*if(repeats < 30) { fprintf(stderr, "Error: repeats must be at least 30, setting is: %d\n", repeats); exit(-1); }*/ printf("Repeat multiply %d times.\n", repeats); if(argc > 3) { alpha = (double) atof(argv[3]); if(argc > 4) { beta = (double) atof(argv[4]); } } } else { printf("Repeat multiply defaulted to %d\n", repeats); } } else { printf("Matrix size defaulted to %d\n", N); } printf("Alpha = %f\n", alpha); printf("Beta = %f\n", beta); if(N < 128) { printf("Error: N (%d) is less than 128, the matrix is too small.\n", N); exit(-1); } # ifndef ENABLE_TIMING_ANALYSIS start_timer(); # else start_timer(0); # endif printf("Allocating Matrices...\n"); double* DGEMM_RESTRICT matrixA = (double*) malloc(sizeof(double) * N * N); double* DGEMM_RESTRICT matrixB = (double*) malloc(sizeof(double) * N * N); double* DGEMM_RESTRICT matrixC = (double*) malloc(sizeof(double) * N * N); printf("Allocation complete, populating with values...\n"); int i, j, k, r, tid, nthreads, check_threads=0; #pragma omp parallel for private(i,j,tid) for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { matrixA[i*N + j] = 2.0; matrixB[i*N + j] = 0.5; matrixC[i*N + j] = 1.0; } } printf("Performing multiplication...\n"); const double start = get_seconds(); // ------------------------------------------------------- // // VENDOR NOTIFICATION: START MODIFIABLE REGION // // Vendor is able to change the lines below to call optimized // DGEMM or other matrix multiplication routines. Do *NOT* // change any lines above this statement. // ------------------------------------------------------- // double sum = 0; // Repeat multiple times for(r = 0; r < repeats; r++) { # ifdef ENABLE_RMT /* Run matrix_multiply in redundant mode */ activateRMT("f-L-2d-2d-2dC-dpC", &matrix_multiply, 4, matrixA, N, N, matrixB, N, N, matrixC, N, N, &sum); # else matrix_multiply(matrixA, matrixB, matrixC, &sum); # endif } // ------------------------------------------------------- // // VENDOR NOTIFICATION: END MODIFIABLE REGION // ------------------------------------------------------- // // ------------------------------------------------------- // // DO NOT CHANGE CODE BELOW // ------------------------------------------------------- // const double end = get_seconds(); printf("Calculating matrix check...\n"); double final_sum = 0; long long int count = 0; #pragma omp parallel for reduction(+:final_sum, count) private(i,j) for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { final_sum += matrixC[i*N + j]; count++; } } double N_dbl = (double) N; double matrix_memory = (3 * N_dbl * N_dbl) * ((double) sizeof(double)); printf("\n"); printf("===============================================================\n"); const double count_dbl = (double) count; const double scaled_result = (final_sum / (count_dbl * repeats)); printf("Final Sum is: %f\n", scaled_result); const double check_sum = N_dbl + (1.0 / (double) (repeats)); const double allowed_margin = 1.0e-8; if( (check_sum >= (scaled_result - allowed_margin)) && (check_sum <= (scaled_result + allowed_margin)) ) { printf(" -> Solution check PASSED successfully.\n"); } else { printf(" -> Solution check FAILED.\n"); } printf("Memory for Matrices: %f MB\n", (matrix_memory / (1024 * 1024))); const double time_taken = (end - start); printf("Multiply time: %f seconds\n", time_taken); const double flops_computed = (N_dbl * N_dbl * N_dbl * 2.0 * (double)(repeats)) + (N_dbl * N_dbl * 2 * (double)(repeats)); printf("FLOPs computed: %f\n", flops_computed); printf("GFLOP/s rate: %f GF/s\n", (flops_computed / time_taken) / 1000000000.0); printf("===============================================================\n"); printf("\n"); # ifndef ENABLE_TIMING_ANALYSIS end_timer(); # else end_timer(0); # endif free(matrixA); free(matrixB); free(matrixC); return 0; }
idaFoodWeb_bnd_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * Based on idaFoodWeb_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDA: Food web problem. * * This example program (OpenMP version) uses the SUNBAND linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDA using the SUNBAND linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idaFoodWeb_bnd_omp * To specify the number of threads at the command line, use * % ./idaFoodWeb_bnd_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ida/ida.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *) ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); LS = SUNLinSol_Band(cc, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, A); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidaFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDA \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int) mu, (long int) ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre+nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
simd-17.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ /* { dg-additional-options "-std=c99" } */ static inline void foo (int *b, int *i, int *j, int x) { *b = *b + x + (*i - *i) + (*j - *j); } int main () { int b, c = 0; b = 7; #pragma omp simd linear(b:2) reduction(+:c) for (int i = 0; i < 64; i++) { c = c + (b != 7 + 2 * i); foo (&b, &i, &i, 2); } if (c || b != 7 + 64 * 2) __builtin_abort (); b = 7; #pragma omp simd linear(b:3) reduction(+:c) for (int i = 0; i < 64; i += 4) { c = c + (b != 7 + i / 4 * 3); foo (&b, &i, &i, 3); } if (c || b != 7 + 16 * 3) __builtin_abort (); b = 7; #pragma omp simd collapse (2) linear(b:2) reduction(+:c) for (int i = 0; i < 8; i++) for (int j = 0; j < 8; j++) { c = c + (b != 7 + 2 * j + 2 * 8 * i); foo (&b, &i, &j, 2); } if (c || b != 7 + 64 * 2) __builtin_abort (); b = 7; #pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c) for (int i = 0; i < 64; i++) { c = c + (b != 7 + 2 * i); foo (&b, &i, &i, 2); } if (c || b != 7 + 64 * 2) __builtin_abort (); b = 7; #pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c) for (int i = 0; i < 64; i += 4) { c = c + (b != 7 + i / 4 * 3); foo (&b, &i, &i, 3); } if (c || b != 7 + 16 * 3) __builtin_abort (); b = 7; #pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c) for (int i = 0; i < 8; i++) for (int j = 0; j < 8; j++) { c = c + (b != 7 + 2 * j + 2 * 8 * i); foo (&b, &i, &j, 2); } if (c || b != 7 + 64 * 2) __builtin_abort (); return 0; }
array.c
#include <assert.h> #include <malloc.h> #include <stdlib.h> #include "actpol/array.h" #include "actpol/astro.h" #include "actpol/math.h" #include "actpol/state.h" #include "actpol/vec3.h" #include "actpol/util.h" #include "debug.h" void actpol_detector_to_focalplane_rotation(double focalplane_x, double focalplane_y, double pol_angle, Quaternion q) { Quaternion_r3(q, -pol_angle); Quaternion_r2_mul(focalplane_x, q); Quaternion_r1_mul(-focalplane_y, q); Quaternion_unit(q); } void ACTpolFeedhorn_init(ACTpolFeedhorn *feedhorn, double focalplane_x, double focalplane_y, double pol_angle) { actpol_detector_to_focalplane_rotation(focalplane_x, focalplane_y, pol_angle, feedhorn->focalplane_q); } ACTpolArray * ACTpolArray_alloc(int nhorns) { ACTpolArray *array; array = (ACTpolArray *)malloc(sizeof(ACTpolArray)); assert(array); array->nhorns = nhorns; array->horn = (ACTpolFeedhorn *)memalign(32, nhorns*sizeof(ACTpolFeedhorn)); assert(array->horn); return array; } void ACTpolArray_free(ACTpolArray *array) { free(array->horn); free(array); } void ACTpolArray_init(ACTpolArray *array, double freq_GHz, double focalplane_x, double focalplane_y) { array->freq_GHz = freq_GHz; actpol_detector_to_focalplane_rotation(focalplane_x, focalplane_y, 0., array->focalplane_q); } ACTpolFeedhorn * ACTpolArray_get_feedhorn(ACTpolArray *array, int i) { assert(i >= 0 && i < array->nhorns); return array->horn + i; } ACTpolArrayCoords * ACTpolArrayCoords_alloc(const ACTpolArray *array) { ACTpolArrayCoords *coords = (ACTpolArrayCoords *)malloc(sizeof(ACTpolArrayCoords)); coords->array = array; coords->ref = (double *)malloc(sizeof(double) * array->nhorns); coords->horn = (ACTpolFeedhornCoords *)malloc(sizeof(ACTpolFeedhornCoords) * array->nhorns); return coords; } void ACTpolArrayCoords_free(ACTpolArrayCoords *coords) { free(coords->horn); free(coords->ref); free(coords); } void ACTpolArrayCoords_init(ACTpolArrayCoords *coords, enum ACTpolCoordinateSystem coordsys) { coords->coordsys = coordsys; coords->mean_ref = arcsec2rad(30.); } ACTpolFeedhornCoords * ACTpolArrayCoords_get_feedhorn_coords(ACTpolArrayCoords *coords, int i) { assert(i >= 0 && i < coords->array->nhorns); return coords->horn + i; } void ACTpolArrayCoords_update_refraction(ACTpolArrayCoords *coords, const ACTpolScan *scan, const ACTpolWeather *weather) { const ACTpolArray *array = coords->array; Quaternion focalplane_to_NWU_q; Quaternion_identity(focalplane_to_NWU_q); actpol_rotate_focalplane_to_NWU(scan->mean_alt, scan->mean_az, focalplane_to_NWU_q); coords->mean_ref = 0.; for (int i = 0; i != coords->array->nhorns; ++i) { Quaternion q; Quaternion_mul(q, focalplane_to_NWU_q, array->horn[i].focalplane_q); double mat[3][3]; Quaternion_to_matrix(q, mat); double alt = asin(mat[2][2]); coords->ref[i] = actpol_refraction(weather, array->freq_GHz, alt); assert(coords->ref[i] > 0. && coords->ref[i] < 1e-3); coords->mean_ref += coords->ref[i]; } coords->mean_ref /= coords->array->nhorns; //DEBUG("mean_ref = %g\"\n", rad2arcsec(coords->mean_ref)); } static void compute_mean_focalplane_to_BCRS(const ACTpolArrayCoords *coords, const ACTpolState *state, Quaternion focalplane_to_BCRS) { double r[3]; Quaternion q; // focalplane -> topo Quaternion focalplane_to_topo; Quaternion_identity(focalplane_to_topo); actpol_rotate_focalplane_to_NWU( state->boresight_alt - coords->mean_ref, state->boresight_az, focalplane_to_topo); // diurnal aberration Quaternion diurnal_aberration, focalplane_to_apparent; Quaternion_mul(q, focalplane_to_topo, coords->array->focalplane_q); Quaternion_unit(q); Quaternion_to_matrix_col3(q, r); actpol_diurnal_aberration(r, diurnal_aberration); Quaternion_mul(focalplane_to_apparent, diurnal_aberration, focalplane_to_topo); // focalplane -> GCRS Quaternion focalplane_to_GCRS; Quaternion_mul(focalplane_to_GCRS, state->NWU_to_GCRS_q, focalplane_to_apparent); // center of array in GCRS Quaternion_mul(q, focalplane_to_GCRS, coords->array->focalplane_q); Quaternion_unit(q); Quaternion_to_matrix_col3(q, r); // annual aberration correction Quaternion GCRS_to_BCRS; actpol_aberration(r, state->earth_orbital_beta, GCRS_to_BCRS); Quaternion_mul(focalplane_to_BCRS, GCRS_to_BCRS, focalplane_to_GCRS); Quaternion_unit(focalplane_to_BCRS); } int ACTpolArrayCoords_update_ra_dec(ACTpolArrayCoords *coords, const ACTpolState *state) { const ACTpolArray *array = coords->array; Quaternion focalplane_to_BCRS; compute_mean_focalplane_to_BCRS(coords, state, focalplane_to_BCRS); #pragma omp parallel for for (int i = 0; i != array->nhorns; ++i) { Quaternion q; Quaternion_mul(q, focalplane_to_BCRS, array->horn[i].focalplane_q); double mat[3][3]; Quaternion_conj(q); // transpose mat Quaternion_to_matrix(q, mat); double *p1 = mat[0]; double *p2 = mat[1]; double *r = mat[2]; ACTpolFeedhornCoords *horn = coords->horn+i; horn->a = atan2(r[1], r[0]); horn->b = atan2(r[2], hypot(r[0],r[1])); // w = r x z double w[3], z[3] = {0, 0, 1}; vec3_cross_product(w, r, z); vec3_unit(w); // n = w x r double n[3]; vec3_cross_product(n, w, r); double sin_g = vec3_dot_product(p1, w); double cos_g = vec3_dot_product(p1, n); horn->sin2gamma = 2*sin_g*cos_g; horn->cos2gamma = 2*cos_g*cos_g - 1; } return 0; } int ACTpolArrayCoords_update_ra_sindec(ACTpolArrayCoords *coords, const ACTpolState *state) { const ACTpolArray *array = coords->array; Quaternion focalplane_to_BCRS; compute_mean_focalplane_to_BCRS(coords, state, focalplane_to_BCRS); Quaternion q0; Quaternion_mul(q0, focalplane_to_BCRS, array->focalplane_q); double r0[3]; Quaternion_to_matrix_col3(q0, r0); double x0 = r0[0]; double y0 = r0[1]; double ra0 = atan2(y0, x0); #pragma omp parallel for for (int i = 0; i != array->nhorns; ++i) { Quaternion q; Quaternion_mul(q, focalplane_to_BCRS, array->horn[i].focalplane_q); double p1[3], r[3]; Quaternion_to_matrix_col1(q, p1); Quaternion_to_matrix_col3(q, r); ACTpolFeedhornCoords *horn = coords->horn+i; /* * Define z = y/x, z0 = y0/x0. * * atan(z) - atan(z0) = atan(z) + atan(-z0) * = atan((z - z0)/(1 + z*z0)) * * Let p = (z - z0)/(1 + z*z0) * = (x0*y - x*y0)/(x*x0 + y*y0). * * For |p| << 1, atan(p) ~ p/(1 + 0.28*p^2) * (Abramowitz & Stegun, 4.4.48) * * Approximation appears to be good to ~0.01". By contrast, * 2nd order Taylor expansion is only good to ~10". */ //double p = (x0*r[1] - r[0]*y0)/(x0*r[0] + y0*r[1]); //double ra1 = p/(1. + 0.28*p*p); double pn = x0*r[1] - r[0]*y0; double pd = x0*r[0] + y0*r[1]; double ra1 = (pn*pd)/(pd*pd + 0.28*pn*pn); horn->a = ra0 + ra1; horn->b = r[2]; // w = r x z const double z[3] = {0., 0., 1.}; double w[3]; vec3_cross_product(w, r, z); // n = w x r double n[3]; vec3_cross_product(n, w, r); double sin_g = vec3_dot_product(p1, w); double cos_g = vec3_dot_product(p1, n); double norm2 = sin_g*sin_g + cos_g*cos_g; /* horn->sin2gamma = 2.*sin_g*cos_g/norm2; horn->cos2gamma = 2.*cos_g*cos_g/norm2 - 1.; */ double two_cos_g_div_norm2 = 2.*cos_g/norm2; horn->sin2gamma = sin_g*two_cos_g_div_norm2; horn->cos2gamma = cos_g*two_cos_g_div_norm2 - 1.; } return 0; } int ACTpolArrayCoords_update_az_alt(ACTpolArrayCoords *coords, const ACTpolState *state) { const ACTpolArray *array = coords->array; // focalplane -> topo Quaternion focalplane_to_topo; Quaternion_identity(focalplane_to_topo); actpol_rotate_focalplane_to_NWU( state->boresight_alt - coords->mean_ref, state->boresight_az, focalplane_to_topo); for (int i = 0; i != array->nhorns; ++i) { Quaternion q; Quaternion_mul(q, focalplane_to_topo, array->horn[i].focalplane_q); double mat[3][3]; Quaternion_conj(q); // transpose mat Quaternion_to_matrix(q, mat); double *r = mat[2]; ACTpolFeedhornCoords *horn = coords->horn+i; horn->a = -atan2(r[1], r[0]); horn->b = atan2(r[2], hypot(r[0],r[1])); } return 0; } int ACTpolArrayCoords_update_galactic(ACTpolArrayCoords *coords, const ACTpolState *state) { const ACTpolArray *array = coords->array; Quaternion focalplane_to_galactic; compute_mean_focalplane_to_BCRS(coords, state, focalplane_to_galactic); actpol_rotate_ICRS_to_galactic(focalplane_to_galactic); #pragma omp parallel for for (int i = 0; i != array->nhorns; ++i) { Quaternion q; Quaternion_mul(q, focalplane_to_galactic, array->horn[i].focalplane_q); double mat[3][3]; Quaternion_conj(q); // transpose mat Quaternion_to_matrix(q, mat); double *p1 = mat[0]; double *p2 = mat[1]; double *r = mat[2]; ACTpolFeedhornCoords *horn = coords->horn+i; horn->a = atan2(r[1], r[0]); horn->b = atan2(r[2], hypot(r[0],r[1])); // w = r x z double w[3], z[3] = {0, 0, 1}; vec3_cross_product(w, r, z); vec3_unit(w); // n = w x r double n[3]; vec3_cross_product(n, w, r); double sin_g = vec3_dot_product(p1, w); double cos_g = vec3_dot_product(p1, n); horn->sin2gamma = 2*sin_g*cos_g; horn->cos2gamma = 2*cos_g*cos_g - 1; } return 0; } int ACTpolArrayCoords_update(ACTpolArrayCoords *coords, const ACTpolState *state) { switch (coords->coordsys) { case ACTPOL_COORDSYS_RA_DEC: return ACTpolArrayCoords_update_ra_dec(coords, state); case ACTPOL_COORDSYS_RA_SINDEC: return ACTpolArrayCoords_update_ra_sindec(coords, state); case ACTPOL_COORDSYS_AZ_ALT: return ACTpolArrayCoords_update_az_alt(coords, state); case ACTPOL_COORDSYS_GALACTIC: return ACTpolArrayCoords_update_galactic(coords, state); } assert(0); return -1; }
DRB091-threadprivate2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. Use threadprivate to avoid data races. This is the case for a variable referenced within a construct. */ #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; int main() { int len=1000; int i, sum=0; { #pragma omp parallel for private(i ) reduction(+:sum0) for (i=0;i<len;i++) { sum0=sum0+i; } } sum= sum+sum0; /* reference calculation */ #pragma omp parallel for private(i ) reduction(+:sum1) for (i=0;i<len;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); assert(sum==sum1); return 0; }
GB_unaryop__lnot_int16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_fp32 // op(A') function: GB_tran__lnot_int16_fp32 // C type: int16_t // A type: float // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z ; GB_CAST_SIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_fp32 ( int16_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OMPIRBuilder.h
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the OpenMPIRBuilder class and helpers used as a convenient // way to create LLVM instructions for OpenMP directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Allocator.h" #include <forward_list> namespace llvm { class CanonicalLoopInfo; /// An interface to create LLVM-IR for OpenMP directives. /// /// Each OpenMP directive has a corresponding public generator method. class OpenMPIRBuilder { public: /// Create a new OpenMPIRBuilder operating on the given module \p M. This will /// not have an effect on \p M (see initialize). OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {} ~OpenMPIRBuilder(); /// Initialize the internal state, this will put structures types and /// potentially other helpers into the underlying module. Must be called /// before any other method and only once! void initialize(); /// Finalize the underlying module, e.g., by outlining regions. /// \param Fn The function to be finalized. If not used, /// all functions are finalized. void finalize(Function *Fn = nullptr); /// Add attributes known for \p FnID to \p Fn. void addAttributes(omp::RuntimeFunction FnID, Function &Fn); /// Type used throughout for insertion points. using InsertPointTy = IRBuilder<>::InsertPoint; /// Callback type for variable finalization (think destructors). /// /// \param CodeGenIP is the insertion point at which the finalization code /// should be placed. /// /// A finalize callback knows about all objects that need finalization, e.g. /// destruction, when the scope of the currently generated construct is left /// at the time, and location, the callback is invoked. using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>; struct FinalizationInfo { /// The finalization callback provided by the last in-flight invocation of /// createXXXX for the directive of kind DK. FinalizeCallbackTy FiniCB; /// The directive kind of the innermost directive that has an associated /// region which might require finalization when it is left. omp::Directive DK; /// Flag to indicate if the directive is cancellable. bool IsCancellable; }; /// Push a finalization callback on the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void pushFinalizationCB(const FinalizationInfo &FI) { FinalizationStack.push_back(FI); } /// Pop the last finalization callback from the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void popFinalizationCB() { FinalizationStack.pop_back(); } /// Callback type for body (=inner region) code generation /// /// The callback takes code locations as arguments, each describing a /// location at which code might need to be generated or a location that is /// the target of control transfer. /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the body code should be /// placed. /// \param ContinuationBB is the basic block target to leave the body. /// /// Note that all blocks pointed to by the arguments have terminators. using BodyGenCallbackTy = function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; // This is created primarily for sections construct as llvm::function_ref // (BodyGenCallbackTy) is not storable (as described in the comments of // function_ref class - function_ref contains non-ownable reference // to the callable. using StorableBodyGenCallbackTy = std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; /// Callback type for loop body code generation. /// /// \param CodeGenIP is the insertion point where the loop's body code must be /// placed. This will be a dedicated BasicBlock with a /// conditional branch from the loop condition check and /// terminated with an unconditional branch to the loop /// latch. /// \param IndVar is the induction variable usable at the insertion point. using LoopBodyGenCallbackTy = function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>; /// Callback type for variable privatization (think copy & default /// constructor). /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the privatization code /// should be placed. /// \param Original The value being copied/created, should not be used in the /// generated IR. /// \param Inner The equivalent of \p Original that should be used in the /// generated IR; this is equal to \p Original if the value is /// a pointer and can thus be passed directly, otherwise it is /// an equivalent but different value. /// \param ReplVal The replacement value, thus a copy or new created version /// of \p Inner. /// /// \returns The new insertion point where code generation continues and /// \p ReplVal the replacement value. using PrivatizeCallbackTy = function_ref<InsertPointTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)>; /// Description of a LLVM-IR insertion point (IP) and a debug/source location /// (filename, line, column, ...). struct LocationDescription { template <typename T, typename U> LocationDescription(const IRBuilder<T, U> &IRB) : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {} LocationDescription(const InsertPointTy &IP) : IP(IP) {} LocationDescription(const InsertPointTy &IP, const DebugLoc &DL) : IP(IP), DL(DL) {} InsertPointTy IP; DebugLoc DL; }; /// Emitter methods for OpenMP directives. /// ///{ /// Generator for '#omp barrier' /// /// \param Loc The location where the barrier directive was encountered. /// \param DK The kind of directive that caused the barrier. /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall = false, bool CheckCancelFlag = true); /// Generator for '#omp cancel' /// /// \param Loc The location where the directive was encountered. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param CanceledDirective The kind of directive that is cancled. /// /// \returns The insertion point after the barrier. InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective); /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param BodyGenCB Callback that will generate the region code. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param NumThreads The evaluated 'num_threads' clause expression, if any. /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind). /// \param IsCancellable Flag to indicate a cancellable parallel region. /// /// \returns The insertion position *after* the parallel. IRBuilder<>::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// This generator operates on the logical iteration space of the loop, i.e. /// the caller only has to provide a loop trip count of the loop as defined by /// base language semantics. The trip count is interpreted as an unsigned /// integer. The induction variable passed to \p BodyGenCB will be of the same /// type and run from 0 to \p TripCount - 1. It is up to the callback to /// convert the logical iteration variable to the loop counter variable in the /// loop body. /// /// \param Loc The insert and source location description. The insert /// location can be between two instructions or the end of a /// degenerate block (e.g. a BB under construction). /// \param BodyGenCB Callback that will generate the loop body code. /// \param TripCount Number of iterations the loop body is executed. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// Instead of a logical iteration space, this allows specifying user-defined /// loop counter values using increment, upper- and lower bounds. To /// disambiguate the terminology when counting downwards, instead of lower /// bounds we use \p Start for the loop counter value in the first body /// iteration. /// /// Consider the following limitations: /// /// * A loop counter space over all integer values of its bit-width cannot be /// represented. E.g using uint8_t, its loop trip count of 256 cannot be /// stored into an 8 bit integer): /// /// DO I = 0, 255, 1 /// /// * Unsigned wrapping is only supported when wrapping only "once"; E.g. /// effectively counting downwards: /// /// for (uint8_t i = 100u; i > 0; i += 127u) /// /// /// TODO: May need to add additional parameters to represent: /// /// * Allow representing downcounting with unsigned integers. /// /// * Sign of the step and the comparison operator might disagree: /// /// for (int i = 0; i < 42; i -= 1u) /// // /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the loop body code. /// \param Start Value of the loop counter for the first iterations. /// \param Stop Loop counter values past this will stop the loop. /// \param Step Loop counter increment after each iteration; negative /// means counting down. /// \param IsSigned Whether Start, Stop and Step are signed integers. /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop /// counter. /// \param ComputeIP Insertion point for instructions computing the trip /// count. Can be used to ensure the trip count is available /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); /// Collapse a loop nest into a single loop. /// /// Merges loops of a loop nest into a single CanonicalLoopNest representation /// that has the same number of innermost loop iterations as the origin loop /// nest. The induction variables of the input loops are derived from the /// collapsed loop's induction variable. This is intended to be used to /// implement OpenMP's collapse clause. Before applying a directive, /// collapseLoops normalizes a loop nest to contain only a single loop and the /// directive's implementation does not need to handle multiple loops itself. /// This does not remove the need to handle all loop nest handling by /// directives, such as the ordered(<n>) clause or the simd schedule-clause /// modifier of the worksharing-loop directive. /// /// Example: /// \code /// for (int i = 0; i < 7; ++i) // Canonical loop "i" /// for (int j = 0; j < 9; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After collapsing with Loops={i,j}, the loop is changed to /// \code /// for (int ij = 0; ij < 63; ++ij) { /// int i = ij / 9; /// int j = ij % 9; /// body(i, j); /// } /// \endcode /// /// In the current implementation, the following limitations apply: /// /// * All input loops have an induction variable of the same type. /// /// * The collapsed loop will have the same trip count integer type as the /// input loops. Therefore it is possible that the collapsed loop cannot /// represent all iterations of the input loops. For instance, assuming a /// 32 bit integer type, and two input loops both iterating 2^16 times, the /// theoretical trip count of the collapsed loop would be 2^32 iteration, /// which cannot be represented in an 32-bit integer. Behavior is undefined /// in this case. /// /// * The trip counts of every input loop must be available at \p ComputeIP. /// Non-rectangular loops are not yet supported. /// /// * At each nest level, code between a surrounding loop and its nested loop /// is hoisted into the loop body, and such code will be executed more /// often than before collapsing (or not at all if any inner loop iteration /// has a trip count of 0). This is permitted by the OpenMP specification. /// /// \param DL Debug location for instructions added for collapsing, /// such as instructions to compute/derive the input loop's /// induction variables. /// \param Loops Loops in the loop nest to collapse. Loops are specified /// from outermost-to-innermost and every control flow of a /// loop's body must pass through its directly nested loop. /// \param ComputeIP Where additional instruction that compute the collapsed /// trip count. If not set, defaults to before the generated /// loop. /// /// \returns The CanonicalLoopInfo object representing the collapsed loop. CanonicalLoopInfo *collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, InsertPointTy ComputeIP); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// TODO: Workshare loops with static scheduling may contain up to two loops /// that fulfill the requirements of an OpenMP canonical loop. One for /// iterating over all iterations of a chunk and another one for iterating /// over all chunks that are executed on the same thread. Returning /// CanonicalLoopInfo objects representing them may eventually be useful for /// the apply clause planned in OpenMP 6.0, but currently whether these are /// canonical loops is irrelevant. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain, and then in each iteration /// to update the loop counter. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param SchedType Type of scheduling to be passed to the init function. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, omp::OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier); /// Tile a loop nest. /// /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in /// \p/ Loops must be perfectly nested, from outermost to innermost loop /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value /// of every loop and every tile sizes must be usable in the outermost /// loop's preheader. This implies that the loop nest is rectangular. /// /// Example: /// \code /// for (int i = 0; i < 15; ++i) // Canonical loop "i" /// for (int j = 0; j < 14; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to /// \code /// for (int i1 = 0; i1 < 3; ++i1) /// for (int j1 = 0; j1 < 2; ++j1) /// for (int i2 = 0; i2 < 5; ++i2) /// for (int j2 = 0; j2 < 7; ++j2) /// body(i1*3+i2, j1*3+j2); /// \endcode /// /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also /// handles non-constant trip counts, non-constant tile sizes and trip counts /// that are not multiples of the tile size. In the latter case the tile loop /// of the last floor-loop iteration will have fewer iterations than specified /// as its tile size. /// /// /// @param DL Debug location for instructions added by tiling, for /// instance the floor- and tile trip count computation. /// @param Loops Loops to tile. The CanonicalLoopInfo objects are /// invalidated by this method, i.e. should not used after /// tiling. /// @param TileSizes For each loop in \p Loops, the tile size for that /// dimensions. /// /// \returns A list of generated loops. Contains twice as many loops as the /// input loop nest; the first half are the floor loops and the /// second half are the tile loops. std::vector<CanonicalLoopInfo *> tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, ArrayRef<Value *> TileSizes); /// Fully unroll a loop. /// /// Instead of unrolling the loop immediately (and duplicating its body /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop /// metadata. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop); /// Fully or partially unroll a loop. How the loop is unrolled is determined /// using LLVM's LoopUnrollPass. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop); /// Partially unroll a loop. /// /// The CanonicalLoopInfo of the unrolled loop for use with chained /// loop-associated directive can be requested using \p UnrolledCLI. Not /// needing the CanonicalLoopInfo allows more efficient code generation by /// deferring the actual unrolling to the LoopUnrollPass using loop metadata. /// A loop-associated directive applied to the unrolled loop needs to know the /// new trip count which means that if using a heuristically determined unroll /// factor (\p Factor == 0), that factor must be computed immediately. We are /// using the same logic as the LoopUnrollPass to derived the unroll factor, /// but which assumes that some canonicalization has taken place (e.g. /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform /// better when the unrolled loop's CanonicalLoopInfo is not needed. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. /// \param Factor The factor to unroll the loop by. A factor of 0 /// indicates that a heuristic should be used to determine /// the unroll-factor. /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the /// partially unrolled loop. Otherwise, uses loop metadata /// to defer unrolling to the LoopUnrollPass. void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI); /// Add metadata to simd-ize a loop. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to simd-ize. void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop); /// Generator for '#omp flush' /// /// \param Loc The location where the flush directive was encountered void createFlush(const LocationDescription &Loc); /// Generator for '#omp taskwait' /// /// \param Loc The location where the taskwait directive was encountered. void createTaskwait(const LocationDescription &Loc); /// Generator for '#omp taskyield' /// /// \param Loc The location where the taskyield directive was encountered. void createTaskyield(const LocationDescription &Loc); /// Functions used to generate reductions. Such functions take two Values /// representing LHS and RHS of the reduction, respectively, and a reference /// to the value that is updated to refer to the reduction result. using ReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>; /// Functions used to generate atomic reductions. Such functions take two /// Values representing pointers to LHS and RHS of the reduction, as well as /// the element type of these pointers. They are expected to atomically /// update the LHS to the reduced value. using AtomicReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>; /// Information about an OpenMP reduction. struct ReductionInfo { ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, ReductionGenTy ReductionGen, AtomicReductionGenTy AtomicReductionGen) : ElementType(ElementType), Variable(Variable), PrivateVariable(PrivateVariable), ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) { assert(cast<PointerType>(Variable->getType()) ->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type"); } /// Reduction element type, must match pointee type of variable. Type *ElementType; /// Reduction variable of pointer type. Value *Variable; /// Thread-private partial reduction variable. Value *PrivateVariable; /// Callback for generating the reduction body. The IR produced by this will /// be used to combine two values in a thread-safe context, e.g., under /// lock or within the same thread, and therefore need not be atomic. ReductionGenTy ReductionGen; /// Callback for generating the atomic reduction body, may be null. The IR /// produced by this will be used to atomically combine two values during /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. AtomicReductionGenTy AtomicReductionGen; }; // TODO: provide atomic and non-atomic reduction generators for reduction // operators defined by the OpenMP specification. /// Generator for '#omp reduction'. /// /// Emits the IR instructing the runtime to perform the specific kind of /// reductions. Expects reduction variables to have been privatized and /// initialized to reduction-neutral values separately. Emits the calls to /// runtime functions as well as the reduction function and the basic blocks /// performing the reduction atomically and non-atomically. /// /// The code emitted for the following: /// /// \code /// type var_1; /// type var_2; /// #pragma omp <directive> reduction(reduction-op:var_1,var_2) /// /* body */; /// \endcode /// /// corresponds to the following sketch. /// /// \code /// void _outlined_par() { /// // N is the number of different reductions. /// void *red_array[] = {privatized_var_1, privatized_var_2, ...}; /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array, /// _omp_reduction_func, /// _gomp_critical_user.reduction.var)) { /// case 1: { /// var_1 = var_1 <reduction-op> privatized_var_1; /// var_2 = var_2 <reduction-op> privatized_var_2; /// // ... /// __kmpc_end_reduce(...); /// break; /// } /// case 2: { /// _Atomic<ReductionOp>(var_1, privatized_var_1); /// _Atomic<ReductionOp>(var_2, privatized_var_2); /// // ... /// break; /// } /// default: break; /// } /// } /// /// void _omp_reduction_func(void **lhs, void **rhs) { /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0]; /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1]; /// // ... /// } /// \endcode /// /// \param Loc The location where the reduction was /// encountered. Must be within the associate /// directive and after the last local access to the /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. /// \param ReductionInfos A list of info on each reduction variable. /// \param IsNoWait A flag set if the reduction is marked as nowait. InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait = false); ///} /// Return the insertion point used by the underlying IRBuilder. InsertPointTy getInsertionPoint() { return Builder.saveIP(); } /// Update the internal location to \p Loc. bool updateToLocation(const LocationDescription &Loc) { Builder.restoreIP(Loc.IP); Builder.SetCurrentDebugLocation(Loc.DL); return Loc.IP.getBlock() != nullptr; } /// Return the function declaration for the runtime function with \p FnID. FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID); Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID); /// Return the (LLVM-IR) string describing the source location \p LocStr. Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the default source location. Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the source location identified by /// the arguments. Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName, unsigned Line, unsigned Column, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as /// fallback if \p DL does not specify the function name. Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize, Function *F = nullptr); /// Return the (LLVM-IR) string describing the source location \p Loc. Constant *getOrCreateSrcLocStr(const LocationDescription &Loc, uint32_t &SrcLocStrSize); /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags. /// TODO: Create a enum class for the Reserve2Flags Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags = omp::IdentFlag(0), unsigned Reserve2Flags = 0); /// Create a hidden global flag \p Name in the module with initial value \p /// Value. GlobalValue *createGlobalFlag(unsigned Value, StringRef Name); /// Generate control flow and cleanup for cancellation. /// /// \param CancelFlag Flag indicating if the cancellation is performed. /// \param CanceledDirective The kind of directive that is cancled. /// \param ExitCB Extra code to be generated in the exit block. void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB = {}); /// Generate a barrier runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. /// \param DK The directive which caused the barrier /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy emitBarrierImpl(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall, bool CheckCancelFlag); /// Generate a flush runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitFlush(const LocationDescription &Loc); /// The finalization stack made up of finalize callbacks currently in-flight, /// wrapped into FinalizationInfo objects that reference also the finalization /// target block and the kind of cancellable directive. SmallVector<FinalizationInfo, 8> FinalizationStack; /// Return true if the last entry in the finalization stack is of kind \p DK /// and cancellable. bool isLastFinalizationInfoCancellable(omp::Directive DK) { return !FinalizationStack.empty() && FinalizationStack.back().IsCancellable && FinalizationStack.back().DK == DK; } /// Generate a taskwait runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskwaitImpl(const LocationDescription &Loc); /// Generate a taskyield runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskyieldImpl(const LocationDescription &Loc); /// Return the current thread ID. /// /// \param Ident The ident (ident_t*) describing the query origin. Value *getOrCreateThreadID(Value *Ident); /// The underlying LLVM-IR module Module &M; /// The LLVM-IR Builder used to create IR. IRBuilder<> Builder; /// Map to remember source location strings StringMap<Constant *> SrcLocStrMap; /// Map to remember existing ident_t*. DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap; /// Helper that contains information about regions we need to outline /// during finalization. struct OutlineInfo { using PostOutlineCBTy = std::function<void(Function &)>; PostOutlineCBTy PostOutlineCB; BasicBlock *EntryBB, *ExitBB; SmallVector<Value *, 2> ExcludeArgsFromAggregate; /// Collect all blocks in between EntryBB and ExitBB in both the given /// vector and set. void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet, SmallVectorImpl<BasicBlock *> &BlockVector); /// Return the function that contains the region to be outlined. Function *getFunction() const { return EntryBB->getParent(); } }; /// Collection of regions that need to be outlined during finalization. SmallVector<OutlineInfo, 16> OutlineInfos; /// Collection of owned canonical loop objects that eventually need to be /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars; /// Create the global variable holding the offload mappings information. GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, std::string VarName); /// Create the global variable holding the offload names information. GlobalVariable * createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, std::string VarName); struct MapperAllocas { AllocaInst *ArgsBase = nullptr; AllocaInst *Args = nullptr; AllocaInst *ArgSizes = nullptr; }; /// Create the allocas instruction used in call to mapper functions. void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas); /// Create the call for the target mapper function. /// \param Loc The source location description. /// \param MapperFunc Function to be called. /// \param SrcLocInfo Source location information global. /// \param MaptypesArg The argument types. /// \param MapnamesArg The argument names. /// \param MapperAllocas The AllocaInst used for the call. /// \param DeviceID Device ID for the call. /// \param NumOperands Number of operands in the call. void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands); public: /// Generator for __kmpc_copyprivate /// /// \param Loc The source location description. /// \param BufSize Number of elements in the buffer. /// \param CpyBuf List of pointers to data to be copied. /// \param CpyFn function to call for copying data. /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise. /// /// \return The insertion position *after* the CopyPrivate call. InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt); /// Generator for '#omp single' /// /// \param Loc The source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param DidIt Local variable used as a flag to indicate 'single' thread /// /// \returns The insertion position *after* the single call. InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, llvm::Value *DidIt); /// Generator for '#omp master' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generator for '#omp masked' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finialize variable copies. /// /// \returns The insertion position *after* the masked. InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter); /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \param CriticalName name of the lock used by the critical directive /// \param HintInst Hint Instruction for hint clause associated with critical /// /// \returns The insertion position *after* the critical. InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst); /// Generator for '#omp ordered depend (source | sink)' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param NumLoops The number of loops in depend clause. /// \param StoreValues The value will be stored in vector address. /// \param Name The name of alloca instruction. /// \param IsDependSource If true, depend source; otherwise, depend sink. /// /// \return The insertion position *after* the ordered. InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef<llvm::Value *> StoreValues, const Twine &Name, bool IsDependSource); /// Generator for '#omp ordered [threads | simd]' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param IsThreads If true, with threads clause or without clause; /// otherwise, with simd clause; /// /// \returns The insertion position *after* the ordered. InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads); /// Generator for '#omp sections' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param SectionCBs Callbacks that will generate body of each section. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IsCancellable Flag to indicate a cancellable parallel region. /// \param IsNowait If true, barrier - to ensure all sections are executed /// before moving forward will not be generated. /// \returns The insertion position *after* the sections. InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait); /// Generator for '#omp section' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \returns The insertion position *after* the section. InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generate conditional branch and relevant BasicBlocks through which private /// threads copy the 'copyin' variables from Master copy to threadprivate /// copies. /// /// \param IP insertion block for copyin conditional /// \param MasterVarPtr a pointer to the master variable /// \param PrivateVarPtr a pointer to the threadprivate variable /// \param IntPtrTy Pointer size type /// \param BranchtoEnd Create a branch between the copyin.not.master blocks // and copy.in.end block /// /// \returns The insertion point where copying operation to be emitted. InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd = true); /// Create a runtime call for kmpc_Alloc /// /// \param Loc The insert and source location description. /// \param Size Size of allocated memory space /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_alloc /// /// \returns CallInst to the OMP_Alloc call CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_free /// /// \param Loc The insert and source location description. /// \param Addr Address of memory space to be freed /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_Free /// /// \returns CallInst to the OMP_Free call CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_threadprivate_cached /// /// \param Loc The insert and source location description. /// \param Pointer pointer to data to be cached /// \param Size size of data to be cached /// \param Name Name of call Instruction for callinst /// /// \returns CallInst to the thread private cache call. CallInst *createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name = Twine("")); /// Create a runtime call for __tgt_interop_init /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param InteropType type of interop operation /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_init call CallInst *createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_destroy /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_destroy call CallInst *createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_use /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_use call CallInst *createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// The `omp target` interface /// /// For more information about the usage of this interface, /// \see openmp/libomptarget/deviceRTLs/common/include/target.h /// ///{ /// Create a runtime call for kmpc_target_init /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); /// Create a runtime call for kmpc_target_deinit /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); ///} /// Declarations for LLVM-IR types (simple, array, function and structure) are /// generated below. Their names are defined and used in OpenMPKinds.def. Here /// we provide the declarations, the initializeTypes function will provide the /// values. /// ///{ #define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr; #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ ArrayType *VarName##Ty = nullptr; \ PointerType *VarName##PtrTy = nullptr; #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ FunctionType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #define OMP_STRUCT_TYPE(VarName, StrName, ...) \ StructType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #include "llvm/Frontend/OpenMP/OMPKinds.def" ///} private: /// Create all simple and struct types exposed by the runtime and remember /// the llvm::PointerTypes of them for easy access later. void initializeTypes(Module &M); /// Common interface for generating entry calls for OMP Directives. /// if the directive has a region/body, It will set the insertion /// point to the body /// /// \param OMPD Directive to generate entry blocks for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitBB block where the region ends. /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional = false); /// Common interface to finalize the region /// /// \param OMPD Directive to generate exiting code for /// \param FinIP Insertion point for emitting Finalization code and exit call /// \param ExitCall Call to the ending OMP Runtime Function /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, bool HasFinalize = true); /// Common Interface to generate OMP inlined regions /// /// \param OMPD Directive to generate inlined region for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitCall Call to the ending OMP Runtime Function /// \param BodyGenCB Body code generation callback. /// \param FiniCB Finalization Callback. Will be called when finalizing region /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// \param IsCancellable if HasFinalize is set to true, indicate if the /// the directive should be cancellable. /// \return The insertion point after the region InsertPointTy EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional = false, bool HasFinalize = true, bool IsCancellable = false); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutive /// parts of the name static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name, unsigned AddressSpace = 0); /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// Value *getOMPCriticalRegionLock(StringRef CriticalName); /// Callback type for Atomic Expression update /// ex: /// \code{.cpp} /// unsigned x = 0; /// #pragma omp atomic update /// x = Expr(x_old); //Expr() is any legal operation /// \endcode /// /// \param XOld the value of the atomic memory address to use for update /// \param IRB reference to the IRBuilder to use /// /// \returns Value to update X to. using AtomicUpdateCallbackTy = const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>; private: enum AtomicKind { Read, Write, Update, Capture }; /// Determine whether to emit flush or not /// /// \param Loc The insert and source location description. /// \param AO The required atomic ordering /// \param AK The OpenMP atomic operation kind used. /// /// \returns wether a flush was emitted or not bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc, AtomicOrdering AO, AtomicKind AK); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic /// instructions. /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, /// or belong to {FADD, FSUB, BAD_BINOP}. /// Then a `cmpExch` based atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param VolatileX true if \a X volatile? /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \returns A pair of the old value of X before the update, and the value /// used for the update. std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr); /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 . /// /// \Return The instruction Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, AtomicRMWInst::BinOp RMWOp); public: /// a struct to pack relevant information while generating atomic Ops struct AtomicOpValue { Value *Var = nullptr; bool IsSigned = false; bool IsVolatile = false; }; /// Emit atomic Read for : V = X --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically read /// \param V Memory address where to store atomically read /// value /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic read IR. InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO); /// Emit atomic write for : X = Expr --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically written to /// \param Expr The value to store. /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic Write IR. InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions. /// \param RMWOp The binary operation used for update. If operation /// is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \return Insertion point after generated atomic update IR. InsertPointTy createAtomicUpdate(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr); /// Emit atomic update for constructs: --- Only Scalar data types /// V = X; X = X BinOp Expr , /// X = X BinOp Expr; V = X, /// V = X; X = Expr BinOp X, /// X = Expr BinOp X; V = X, /// V = X; X = UpdateOp(X), /// X = UpdateOp(X); V = X, /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param V Memory address where to store captured value /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param UpdateExpr true if X is an in place update of the form /// X = X BinOp Expr or X = Expr BinOp X /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr); /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's /// IRBuilder location is not preserved. /// /// \param DL DebugLoc used for the instructions in the skeleton. /// \param TripCount Value to be used for the trip count. /// \param F Function in which to insert the BasicBlocks. /// \param PreInsertBefore Where to insert BBs that execute before the body, /// typically the body itself. /// \param PostInsertBefore Where to insert BBs that execute after the body. /// \param Name Base name used to derive BB /// and instruction names. /// /// \returns The CanonicalLoopInfo that represents the emitted loop. CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name = {}); }; /// Class to represented the control flow structure of an OpenMP canonical loop. /// /// The control-flow structure is standardized for easy consumption by /// directives associated with loops. For instance, the worksharing-loop /// construct may change this control flow such that each loop iteration is /// executed on only one thread. The constraints of a canonical loop in brief /// are: /// /// * The number of loop iterations must have been computed before entering the /// loop. /// /// * Has an (unsigned) logical induction variable that starts at zero and /// increments by one. /// /// * The loop's CFG itself has no side-effects. The OpenMP specification /// itself allows side-effects, but the order in which they happen, including /// how often or whether at all, is unspecified. We expect that the frontend /// will emit those side-effect instructions somewhere (e.g. before the loop) /// such that the CanonicalLoopInfo itself can be side-effect free. /// /// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated /// execution of a loop body that satifies these constraints. It does NOT /// represent arbitrary SESE regions that happen to contain a loop. Do not use /// CanonicalLoopInfo for such purposes. /// /// The control flow can be described as follows: /// /// Preheader /// | /// /-> Header /// | | /// | Cond---\ /// | | | /// | Body | /// | | | | /// | <...> | /// | | | | /// \--Latch | /// | /// Exit /// | /// After /// /// The loop is thought to start at PreheaderIP (at the Preheader's terminator, /// including) and end at AfterIP (at the After's first instruction, excluding). /// That is, instructions in the Preheader and After blocks (except the /// Preheader's terminator) are out of CanonicalLoopInfo's control and may have /// side-effects. Typically, the Preheader is used to compute the loop's trip /// count. The instructions from BodyIP (at the Body block's first instruction, /// excluding) until the Latch are also considered outside CanonicalLoopInfo's /// control and thus can have side-effects. The body block is the single entry /// point into the loop body, which may contain arbitrary control flow as long /// as all control paths eventually branch to the Latch block. /// /// TODO: Consider adding another standardized BasicBlock between Body CFG and /// Latch to guarantee that there is only a single edge to the latch. It would /// make loop transformations easier to not needing to consider multiple /// predecessors of the latch (See redirectAllPredecessorsTo) and would give us /// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that /// executes after each body iteration. /// /// There must be no loop-carried dependencies through llvm::Values. This is /// equivalant to that the Latch has no PHINode and the Header's only PHINode is /// for the induction variable. /// /// All code in Header, Cond, Latch and Exit (plus the terminator of the /// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked /// by assertOK(). They are expected to not be modified unless explicitly /// modifying the CanonicalLoopInfo through a methods that applies a OpenMP /// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop, /// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its /// basic blocks. After invalidation, the CanonicalLoopInfo must not be used /// anymore as its underlying control flow may not exist anymore. /// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop /// may also return a new CanonicalLoopInfo that can be passed to other /// loop-associated construct implementing methods. These loop-transforming /// methods may either create a new CanonicalLoopInfo usually using /// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and /// modify one of the input CanonicalLoopInfo and return it as representing the /// modified loop. What is done is an implementation detail of /// transformation-implementing method and callers should always assume that the /// CanonicalLoopInfo passed to it is invalidated and a new object is returned. /// Returned CanonicalLoopInfo have the same structure and guarantees as the one /// created by createCanonicalLoop, such that transforming methods do not have /// to special case where the CanonicalLoopInfo originated from. /// /// Generally, methods consuming CanonicalLoopInfo do not need an /// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the /// CanonicalLoopInfo to insert new or modify existing instructions. Unless /// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate /// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically, /// any InsertPoint in the Preheader, After or Block can still be used after /// calling such a method. /// /// TODO: Provide mechanisms for exception handling and cancellation points. /// /// Defined outside OpenMPIRBuilder because nested classes cannot be /// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h. class CanonicalLoopInfo { friend class OpenMPIRBuilder; private: BasicBlock *Header = nullptr; BasicBlock *Cond = nullptr; BasicBlock *Latch = nullptr; BasicBlock *Exit = nullptr; /// Add the control blocks of this loop to \p BBs. /// /// This does not include any block from the body, including the one returned /// by getBody(). /// /// FIXME: This currently includes the Preheader and After blocks even though /// their content is (mostly) not under CanonicalLoopInfo's control. /// Re-evaluated whether this makes sense. void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs); public: /// Returns whether this object currently represents the IR of a loop. If /// returning false, it may have been consumed by a loop transformation or not /// been intialized. Do not use in this case; bool isValid() const { return Header; } /// The preheader ensures that there is only a single edge entering the loop. /// Code that must be execute before any loop iteration can be emitted here, /// such as computing the loop trip count and begin lifetime markers. Code in /// the preheader is not considered part of the canonical loop. BasicBlock *getPreheader() const; /// The header is the entry for each iteration. In the canonical control flow, /// it only contains the PHINode for the induction variable. BasicBlock *getHeader() const { assert(isValid() && "Requires a valid canonical loop"); return Header; } /// The condition block computes whether there is another loop iteration. If /// yes, branches to the body; otherwise to the exit block. BasicBlock *getCond() const { assert(isValid() && "Requires a valid canonical loop"); return Cond; } /// The body block is the single entry for a loop iteration and not controlled /// by CanonicalLoopInfo. It can contain arbitrary control flow but must /// eventually branch to the \p Latch block. BasicBlock *getBody() const { assert(isValid() && "Requires a valid canonical loop"); return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0); } /// Reaching the latch indicates the end of the loop body code. In the /// canonical control flow, it only contains the increment of the induction /// variable. BasicBlock *getLatch() const { assert(isValid() && "Requires a valid canonical loop"); return Latch; } /// Reaching the exit indicates no more iterations are being executed. BasicBlock *getExit() const { assert(isValid() && "Requires a valid canonical loop"); return Exit; } /// The after block is intended for clean-up code such as lifetime end /// markers. It is separate from the exit block to ensure, analogous to the /// preheader, it having just a single entry edge and being free from PHI /// nodes should there be multiple loop exits (such as from break /// statements/cancellations). BasicBlock *getAfter() const { assert(isValid() && "Requires a valid canonical loop"); return Exit->getSingleSuccessor(); } /// Returns the llvm::Value containing the number of loop iterations. It must /// be valid in the preheader and always interpreted as an unsigned integer of /// any bit-width. Value *getTripCount() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *CmpI = &Cond->front(); assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); return CmpI->getOperand(1); } /// Returns the instruction representing the current logical induction /// variable. Always unsigned, always starting at 0 with an increment of one. Instruction *getIndVar() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *IndVarPHI = &Header->front(); assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI"); return IndVarPHI; } /// Return the type of the induction variable (and the trip count). Type *getIndVarType() const { assert(isValid() && "Requires a valid canonical loop"); return getIndVar()->getType(); } /// Return the insertion point for user code before the loop. OpenMPIRBuilder::InsertPointTy getPreheaderIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Preheader = getPreheader(); return {Preheader, std::prev(Preheader->end())}; }; /// Return the insertion point for user code in the body. OpenMPIRBuilder::InsertPointTy getBodyIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Body = getBody(); return {Body, Body->begin()}; }; /// Return the insertion point for user code after the loop. OpenMPIRBuilder::InsertPointTy getAfterIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *After = getAfter(); return {After, After->begin()}; }; Function *getFunction() const { assert(isValid() && "Requires a valid canonical loop"); return Header->getParent(); } /// Consistency self-check. void assertOK() const; /// Invalidate this loop. That is, the underlying IR does not fulfill the /// requirements of an OpenMP canonical loop anymore. void invalidate(); }; } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
GB_binop__islt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_int16) // A.*B function (eWiseMult): GB (_AemultB_08__islt_int16) // A.*B function (eWiseMult): GB (_AemultB_02__islt_int16) // A.*B function (eWiseMult): GB (_AemultB_04__islt_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int16) // A*D function (colscale): GB (_AxD__islt_int16) // D*A function (rowscale): GB (_DxB__islt_int16) // C+=B function (dense accum): GB (_Cdense_accumB__islt_int16) // C+=b function (dense accum): GB (_Cdense_accumb__islt_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int16) // C=scalar+B GB (_bind1st__islt_int16) // C=scalar+B' GB (_bind1st_tran__islt_int16) // C=A+scalar GB (_bind2nd__islt_int16) // C=A'+scalar GB (_bind2nd_tran__islt_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT16 || GxB_NO_ISLT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pcpaesmctrcaomp.c
/******************************************************************************* * Copyright 2013-2018 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // Purpose: // Cryptography Primitive. // AES encryption/decryption (CTR mode) // // Contents: // ippsAESEncryptCTR() // ippsAESDecryptCTR() // // */ #include "owndefs.h" #if defined(_OPENMP) #include "owncp.h" #include "pcpaesm.h" #include "pcptool.h" #include "omp.h" #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPOSITE_GF_) # pragma message("_ALG_AES_SAFE_COMPOSITE_GF_ enabled") #elif (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) # pragma message("_ALG_AES_SAFE_COMPACT_SBOX_ enabled") # include "pcprijtables.h" #else # pragma message("_ALG_AES_SAFE_ disabled") #endif /* // AES-CTR processing. // // Returns: Reason: // ippStsNullPtrErr pCtx == NULL // pSrc == NULL // pDst == NULL // pCtrValue ==NULL // ippStsContextMatchErr !VALID_AES_ID() // ippStsLengthErr len <1 // ippStsCTRSizeErr 128 < ctrNumBitSize < 1 // ippStsNoErr no errors // // Parameters: // pSrc pointer to the source data buffer // pDst pointer to the target data buffer // dataLen input/output buffer length (in bytes) // pCtx pointer to rge AES context // pCtrValue pointer to the counter block // ctrNumBitSize counter block size (bits) // // Note: // counter will updated on return // */ static void AES_CTR_processing(const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize) { #if (_IPP>=_IPP_P8) || (_IPP32E>=_IPP32E_Y8) /* use pipelined version if possible */ if(AES_NI_ENABLED==RIJ_AESNI(pCtx)) { /* construct ctr mask */ Ipp8u maskIV[MBS_RIJ128]; int n; int maskPosition = (MBS_RIJ128*8-ctrNumBitSize)/8; Ipp8u maskValue = (Ipp8u)(0xFF >> (MBS_RIJ128*8-ctrNumBitSize)%8 ); for(n=0; n<maskPosition; n++) maskIV[n] = 0; maskIV[maskPosition] = maskValue; for(n=maskPosition+1; n<16; n++) maskIV[n] = 0xFF; EncryptCTR_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), nBlocks*MBS_RIJ128, pCtrValue, (Ipp8u*)maskIV); } else #endif { /* setup encoder method */ RijnCipher encoder = RIJ_ENCODER(pCtx); Ipp32u output[NB(128)]; /* copy counter value */ Ipp32u ctr[NB(128)]; CopyBlock16(pCtrValue, ctr ); /* // block-by-block processing */ while(nBlocks) { /* encrypt counter block */ #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) encoder((Ipp8u*)ctr, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), RijEncSbox/*NULL*/); #else encoder((Ipp8u*)ctr, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), NULL); #endif /* compute ciphertext block */ XorBlock16(pSrc, output, pDst); /* encrement counter block */ StdIncrement((Ipp8u*)ctr,MBS_RIJ128*8, ctrNumBitSize); pSrc += MBS_RIJ128; pDst += MBS_RIJ128; nBlocks--; } /* copy counter back */ CopyBlock16(ctr, pCtrValue); } } static IppStatus AES_ctr(const Ipp8u* pSrc, Ipp8u* pDst, int srcLen, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize) { /* test the pointers */ IPP_BAD_PTR4_RET(pSrc, pDst, pCtx, pCtrValue); /* align the context */ pCtx = (IppsAESSpec*)(IPP_ALIGNED_PTR(pCtx, AES_ALIGNMENT)); /* test the context ID */ IPP_BADARG_RET(!VALID_AES_ID( pCtx ), ippStsContextMatchErr); /* test the data stream length */ IPP_BADARG_RET((srcLen<1), ippStsLengthErr); /* test the counter block size */ IPP_BADARG_RET((128<ctrNumBitSize) || (ctrNumBitSize<1), ippStsCTRSizeErr); { int nBlocks = srcLen / MBS_RIJ128; if(nBlocks) { int blk_per_thread = AES_NI_ENABLED==RIJ_AESNI(pCtx)? AESNI128_MIN_BLK_PER_THREAD : RIJ128_MIN_BLK_PER_THREAD; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/blk_per_thread, 1)); if(1==nThreads) { AES_CTR_processing(pSrc, pDst, nBlocks, pCtx, pCtrValue, ctrNumBitSize); goto ctr_tail; return ippStsNoErr; } else { int blksThreadReg; int blksThreadTail; #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*blksThreadReg * MBS_RIJ128; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*blksThreadReg * MBS_RIJ128; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; /* compute thread conter */ Ipp8u thread_counter[MBS_RIJ128]; ompStdIncrement128(pCtrValue, thread_counter, ctrNumBitSize, id*blksThreadReg); AES_CTR_processing(pThreadSrc, pThreadDst, blkThread, pCtx, thread_counter, ctrNumBitSize); } } /* update counter */ ompStdIncrement128(pCtrValue, pCtrValue, ctrNumBitSize, nBlocks); } } ctr_tail: /* process the rest of data block if any */ srcLen &= MBS_RIJ128-1; if(srcLen) { Ipp32u counter[NB(128)]; Ipp32u output[NB(128)]; /* setup encoder method */ RijnCipher encoder = RIJ_ENCODER(pCtx); /* copy counter */ CopyBlock16(pCtrValue, counter); pSrc += nBlocks*MBS_RIJ128; pDst += nBlocks*MBS_RIJ128; /* encrypt counter block */ #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) encoder((Ipp8u*)counter, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), RijEncSbox/*NULL*/); #else encoder((Ipp8u*)counter, (Ipp8u*)output, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), NULL); #endif /* compute ciphertext block */ XorBlock(pSrc, output, pDst, srcLen); /* encrement counter block */ StdIncrement((Ipp8u*)counter, MBS_RIJ128*8, ctrNumBitSize); /* copy counter back */ CopyBlock16(counter, pCtrValue); } return ippStsNoErr; } } IPPFUN(IppStatus, ippsAESEncryptCTR,(const Ipp8u* pSrc, Ipp8u* pDst, int dataLen, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize )) { return AES_ctr(pSrc, pDst, dataLen, pCtx, pCtrValue, ctrNumBitSize); } IPPFUN(IppStatus, ippsAESDecryptCTR,(const Ipp8u* pSrc, Ipp8u* pDst, int dataLen, const IppsAESSpec* pCtx, Ipp8u* pCtrValue, int ctrNumBitSize )) { return AES_ctr(pSrc, pDst, dataLen, pCtx, pCtrValue, ctrNumBitSize); } #endif /* _OPENMP */
debug_uv.h
#ifndef DEBUG_UV_H #define DEBUG_UV_H #include "../integrator.h" #include "../textures/imageTexture.h" class DebugUV : public Integrator { private: std::shared_ptr<Texture> uv_img; public: DebugUV(const std::shared_ptr<Camera>& _camera, const std::shared_ptr<Sampler>& _sampler) : Integrator(_camera, _sampler) { uv_img = std::make_shared<ImageTexture>("uv_test.png"); }; RGB Li(const Ray& ray, Scene& scene) const { Hit res; if(scene.intersect(ray, res)) { if(res.u < 0 || res.u > 1.0 || res.v < 0 || res.v > 1.0) { std::cout << res.u << ", " << res.v << std::endl; } return uv_img->getColor(res); } else { return RGB(0); } }; void render(Scene& scene) const { const int width = this->camera->film->width; const int height = this->camera->film->height; #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < width; i++) { for(int j = 0; j < height; j++) { float u = (2.0*(i + 0.5f) - width)/height; float v = (2.0*(j + 0.5f) - height)/height; Vec2 uv(u, v); Ray ray; float weight = 1.0f; if(!this->camera->getRay(u, v, *(this->sampler), ray, weight)) { this->camera->film->addSample(uv, RGB(0, 0, 0)); } else { RGB li = weight*this->Li(ray, scene); this->camera->film->addSample(uv, li); } } } this->camera->film->ppm_output("output.ppm"); }; }; #endif
GB_unaryop__identity_uint16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_bool // op(A') function: GB_tran__identity_uint16_bool // C type: uint16_t // A type: bool // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_bool ( uint16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int32_fp64 // op(A') function: GB_tran__ainv_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int32_t z ; GB_CAST_SIGNED(z,aij,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int32_fp64 ( int32_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(images,complex_images,images->rows,1L) #endif for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register PixelPacket *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Ar_image->columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Ai_image->columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,Br_image->columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Bi_image->columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const PixelPacket *) NULL) || (Ai == (const PixelPacket *) NULL) || (Br == (const PixelPacket *) NULL) || (Bi == (const PixelPacket *) NULL) || (Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) images->columns; x++) { switch (op) { case AddComplexOperator: { Cr->red=Ar->red+Br->red; Ci->red=Ai->red+Bi->red; Cr->green=Ar->green+Br->green; Ci->green=Ai->green+Bi->green; Cr->blue=Ar->blue+Br->blue; Ci->blue=Ai->blue+Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity+Br->opacity; Ci->opacity=Ai->opacity+Bi->opacity; } break; } case ConjugateComplexOperator: default: { Cr->red=Ar->red; Ci->red=(-Bi->red); Cr->green=Ar->green; Ci->green=(-Bi->green); Cr->blue=Ar->blue; Ci->blue=(-Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity; Ci->opacity=(-Bi->opacity); } break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr); Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red); Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red); gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+ snr); Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green); Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green); gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr); Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue); Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue); if (images->matte != MagickFalse) { gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity* Bi->opacity+snr); Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity* Bi->opacity); Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity* Bi->opacity); } break; } case MagnitudePhaseComplexOperator: { Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red); Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5; Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green); Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5; Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue); Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5; if (images->matte != MagickFalse) { Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity); Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5; } break; } case MultiplyComplexOperator: { Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red); Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red); Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green); Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green); Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue); Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity* Bi->opacity); Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity* Bi->opacity); } break; } case RealImaginaryComplexOperator: { Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5)); Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5)); Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5)); Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5)); Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5)); Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5)); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5)); Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5)); } break; } case SubtractComplexOperator: { Cr->red=Ar->red-Br->red; Ci->red=Ai->red-Bi->red; Cr->green=Ar->green-Br->green; Ci->green=Ai->green-Bi->green; Cr->blue=Ar->blue-Br->blue; Ci->blue=Ai->blue-Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity-Br->opacity; Ci->opacity=Ai->opacity-Bi->opacity; } break; } } Ar++; Ai++; Br++; Bi++; Cr++; Ci++; } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ComplexImages) #endif proceed=SetImageProgress(images,ComplexImageTag,progress++, images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width* sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* magnitude_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { source_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { source_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize Fourier transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsGrayImage(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayChannels,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image,RedChannel, modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->matte != MagickFalse) thread_status=ForwardFourierTransformChannel(image, OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Inverse Fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { magnitude_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { phase_pixels[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { phase_pixels[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; double *source_pixels; const char *value; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* source_pixels[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsGrayImage(magnitude_image,exception); if (is_gray != MagickFalse) is_gray=IsGrayImage(phase_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayChannels,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
convolution_winograd_transform_packn.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_packn_rvv(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array float tmp[8][8][packn]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * packn; for (int m = 0; m < 8; m++) { vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl); vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl); vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl); vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl); vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl); vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl); vfloat32m1_t _r06 = vle32_v_f32m1(r0 + packn * 6, vl); vfloat32m1_t _r07 = vle32_v_f32m1(r0 + packn * 7, vl); vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r00, _r06, vl), 5.25f, vfsub_vv_f32m1(_r04, _r02, vl), vl); vfloat32m1_t _tmp7m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r07, _r01, vl), 5.25f, vfsub_vv_f32m1(_r03, _r05, vl), vl); vse32_v_f32m1(tmp[0][m], _tmp0m, vl); vse32_v_f32m1(tmp[7][m], _tmp7m, vl); vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r02, _r06, vl), -4.25f, _r04, vl); vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r01, _r05, vl), -4.25f, _r03, vl); vfloat32m1_t _tmp1m = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl); vfloat32m1_t _tmp2m = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl); vse32_v_f32m1(tmp[1][m], _tmp1m, vl); vse32_v_f32m1(tmp[2][m], _tmp2m, vl); vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl); vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl); vfloat32m1_t _tmp3m = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl); vfloat32m1_t _tmp4m = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl); vse32_v_f32m1(tmp[3][m], _tmp3m, vl); vse32_v_f32m1(tmp[4][m], _tmp4m, vl); vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_r06, 4.f, vfmacc_vf_f32m1(_r02, -1.25f, _r04, vl), vl); vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl); vfloat32m1_t _tmp5m = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl); vfloat32m1_t _tmp6m = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl); vse32_v_f32m1(tmp[5][m], _tmp5m, vl); vse32_v_f32m1(tmp[6][m], _tmp6m, vl); r0 += w * packn; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * packn; float* r0_tm_1 = r0_tm_0 + tiles * packn; float* r0_tm_2 = r0_tm_0 + tiles * packn * 2; float* r0_tm_3 = r0_tm_0 + tiles * packn * 3; float* r0_tm_4 = r0_tm_0 + tiles * packn * 4; float* r0_tm_5 = r0_tm_0 + tiles * packn * 5; float* r0_tm_6 = r0_tm_0 + tiles * packn * 6; float* r0_tm_7 = r0_tm_0 + tiles * packn * 7; for (int m = 0; m < 8; m++) { vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl); vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl); vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl); vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl); vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl); vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl); vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl); vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl); vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f32m1(_tmp04, _tmp02, vl), vl); vfloat32m1_t _r0tm7 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f32m1(_tmp03, _tmp05, vl), vl); vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl); vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl); vfloat32m1_t _r0tm1 = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl); vfloat32m1_t _r0tm2 = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl); vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl); vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl); vfloat32m1_t _r0tm3 = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl); vfloat32m1_t _r0tm4 = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl); vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_tmp06, 4.f, vfmacc_vf_f32m1(_tmp02, -1.25f, _tmp04, vl), vl); vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl); vfloat32m1_t _r0tm5 = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl); vfloat32m1_t _r0tm6 = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl); vse32_v_f32m1(r0_tm_0, _r0tm0, vl); vse32_v_f32m1(r0_tm_1, _r0tm1, vl); vse32_v_f32m1(r0_tm_2, _r0tm2, vl); vse32_v_f32m1(r0_tm_3, _r0tm3, vl); vse32_v_f32m1(r0_tm_4, _r0tm4, vl); vse32_v_f32m1(r0_tm_5, _r0tm5, vl); vse32_v_f32m1(r0_tm_6, _r0tm6, vl); vse32_v_f32m1(r0_tm_7, _r0tm7, vl); r0_tm_0 += tiles * packn * 8; r0_tm_1 += tiles * packn * 8; r0_tm_2 += tiles * packn * 8; r0_tm_3 += tiles * packn * 8; r0_tm_4 += tiles * packn * 8; r0_tm_5 += tiles * packn * 8; r0_tm_6 += tiles * packn * 8; r0_tm_7 += tiles * packn * 8; } } } } } static void conv3x3s1_winograd63_transform_output_packn_rvv(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); vfloat32m1_t _bias0 = biasptr ? vle32_v_f32m1(biasptr + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl); // NOTE c99 variable length array float tmp[6][8][packn]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * packn; const float* output0_tm_1 = output0_tm_0 + tiles * packn; const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5; const float* output0_tm_6 = output0_tm_0 + tiles * packn * 6; const float* output0_tm_7 = output0_tm_0 + tiles * packn * 7; float* output0 = out0.row(i * 6) + (j * 6) * packn; for (int m = 0; m < 8; m++) { vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl); vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl); vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl); vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl); vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl); vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl); vfloat32m1_t _out0tm6 = vle32_v_f32m1(output0_tm_6, vl); vfloat32m1_t _out0tm7 = vle32_v_f32m1(output0_tm_7, vl); vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl); vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl); vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl); vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl); vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_out0tm5, _out0tm6, vl); vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_out0tm5, _out0tm6, vl); vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl); vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl); vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl); vse32_v_f32m1(tmp[0][m], _tmp0m, vl); vse32_v_f32m1(tmp[2][m], _tmp2m, vl); vse32_v_f32m1(tmp[4][m], _tmp4m, vl); vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl); vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl); vfloat32m1_t _tmp5m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl); vse32_v_f32m1(tmp[1][m], _tmp1m, vl); vse32_v_f32m1(tmp[3][m], _tmp3m, vl); vse32_v_f32m1(tmp[5][m], _tmp5m, vl); output0_tm_0 += tiles * packn * 8; output0_tm_1 += tiles * packn * 8; output0_tm_2 += tiles * packn * 8; output0_tm_3 += tiles * packn * 8; output0_tm_4 += tiles * packn * 8; output0_tm_5 += tiles * packn * 8; output0_tm_6 += tiles * packn * 8; output0_tm_7 += tiles * packn * 8; } for (int m = 0; m < 6; m++) { vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl); vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl); vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl); vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl); vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl); vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl); vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl); vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl); vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_tmp01, _tmp02, vl); vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_tmp01, _tmp02, vl); vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_tmp03, _tmp04, vl); vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_tmp03, _tmp04, vl); vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_tmp05, _tmp06, vl); vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_tmp05, _tmp06, vl); vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl); vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl); vfloat32m1_t _out04 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl); vse32_v_f32m1(output0, _out00, vl); vse32_v_f32m1(output0 + packn * 2, _out02, vl); vse32_v_f32m1(output0 + packn * 4, _out04, vl); vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl); vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl); vfloat32m1_t _out05 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp07, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl); vse32_v_f32m1(output0 + packn, _out01, vl); vse32_v_f32m1(output0 + packn * 3, _out03, vl); vse32_v_f32m1(output0 + packn * 5, _out05, vl); output0 += outw * packn; } } } } } static void conv3x3s1_winograd43_transform_input_packn_rvv(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); // NOTE c99 variable length array float tmp[6][6][packn]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * packn; for (int m = 0; m < 6; m++) { vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl); vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl); vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl); vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl); vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl); vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl); vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl); vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r04, _r03, vl), -4.f, vfadd_vv_f32m1(_r01, _r02, vl), vl); vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r03, vl), 4.f, vfsub_vv_f32m1(_r01, _r02, vl), vl); vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), -2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl); vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), 2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl); vfloat32m1_t _tmp5m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl); vse32_v_f32m1(tmp[0][m], _tmp0m, vl); vse32_v_f32m1(tmp[1][m], _tmp1m, vl); vse32_v_f32m1(tmp[2][m], _tmp2m, vl); vse32_v_f32m1(tmp[3][m], _tmp3m, vl); vse32_v_f32m1(tmp[4][m], _tmp4m, vl); vse32_v_f32m1(tmp[5][m], _tmp5m, vl); r0 += w * packn; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * packn; float* r0_tm_1 = r0_tm_0 + tiles * packn; float* r0_tm_2 = r0_tm_0 + tiles * packn * 2; float* r0_tm_3 = r0_tm_0 + tiles * packn * 3; float* r0_tm_4 = r0_tm_0 + tiles * packn * 4; float* r0_tm_5 = r0_tm_0 + tiles * packn * 5; for (int m = 0; m < 6; m++) { vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl); vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl); vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl); vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl); vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl); vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl); vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl); vfloat32m1_t _r0tm1 = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f32m1(_tmp01, _tmp02, vl), vl); vfloat32m1_t _r0tm2 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f32m1(_tmp01, _tmp02, vl), vl); vfloat32m1_t _r0tm3 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl); vfloat32m1_t _r0tm4 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl); vfloat32m1_t _r0tm5 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl); vse32_v_f32m1(r0_tm_0, _r0tm0, vl); vse32_v_f32m1(r0_tm_1, _r0tm1, vl); vse32_v_f32m1(r0_tm_2, _r0tm2, vl); vse32_v_f32m1(r0_tm_3, _r0tm3, vl); vse32_v_f32m1(r0_tm_4, _r0tm4, vl); vse32_v_f32m1(r0_tm_5, _r0tm5, vl); r0_tm_0 += tiles * packn * 6; r0_tm_1 += tiles * packn * 6; r0_tm_2 += tiles * packn * 6; r0_tm_3 += tiles * packn * 6; r0_tm_4 += tiles * packn * 6; r0_tm_5 += tiles * packn * 6; } } } } } static void conv3x3s1_winograd43_transform_output_packn_rvv(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); vfloat32m1_t _bias0 = biasptr ? vle32_v_f32m1(biasptr + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl); // NOTE variable length array float tmp[4][6][packn]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * packn; const float* output0_tm_1 = output0_tm_0 + tiles * packn; const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2; const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3; const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4; const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5; float* output0 = out0.row(i * 4) + (j * 4) * packn; for (int m = 0; m < 6; m++) { vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl); vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl); vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl); vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl); vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl); vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl); vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl); vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl); vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl); vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl); vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp02a, vl), _tmp02b, vl); vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl); vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl); vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl); vse32_v_f32m1(tmp[0][m], _tmp0m, vl); vse32_v_f32m1(tmp[1][m], _tmp1m, vl); vse32_v_f32m1(tmp[2][m], _tmp2m, vl); vse32_v_f32m1(tmp[3][m], _tmp3m, vl); output0_tm_0 += tiles * packn * 6; output0_tm_1 += tiles * packn * 6; output0_tm_2 += tiles * packn * 6; output0_tm_3 += tiles * packn * 6; output0_tm_4 += tiles * packn * 6; output0_tm_5 += tiles * packn * 6; } for (int m = 0; m < 4; m++) { vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl); vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl); vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl); vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl); vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl); vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl); vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_tmp01, _tmp02, vl); vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_tmp01, _tmp02, vl); vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_tmp03, _tmp04, vl); vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_tmp03, _tmp04, vl); vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl); vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl), vl); vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl), vl); vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl); vse32_v_f32m1(output0, _out00, vl); vse32_v_f32m1(output0 + packn, _out01, vl); vse32_v_f32m1(output0 + packn * 2, _out02, vl); vse32_v_f32m1(output0 + packn * 3, _out03, vl); output0 += outw * packn; } } } } }
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Init(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetSortedColumnBatches()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const { std::vector<bst_uint> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector<unsigned> &root_index = fmat.Info().root_index_; { // setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std::fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetSortedColumnBatches()) { for (auto fid : fsplits) { auto col = batch[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid : qexpand_) { thread_temp[tid][nid] = TStats(); } } // setup position const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } } // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
GB_bitmap_AxB_saxpy_A_sparse_B_bitmap_template.c
//------------------------------------------------------------------------------ // GB_bitmap_AxB_saxpy_A_sparse_B_bitmap: C<#M>+=A*B, C bitmap, M any format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { if (use_coarse_tasks) { //---------------------------------------------------------------------- // C<#M> += A*B using coarse tasks //---------------------------------------------------------------------- // number of columns in the workspace for each task #define GB_PANEL_SIZE 4 //---------------------------------------------------------------------- // allocate workspace for each task //---------------------------------------------------------------------- GB_WERK_PUSH (GH_slice, 2*ntasks, int64_t) ; if (GH_slice == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict G_slice = GH_slice ; int64_t *restrict H_slice = GH_slice + ntasks ; int64_t gwork = 0 ; int64_t hwork = 0 ; int tid ; for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend ; GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ; int64_t jtask = jend - jstart ; int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ; G_slice [tid] = gwork ; H_slice [tid] = hwork ; if (jpanel > 1) { // no need to allocate workspace for Gb and Gx if jpanel == 1 gwork += jpanel ; } hwork += jpanel ; } int64_t bvlenx = (B_is_pattern ? 0 : bvlen) * GB_BSIZE ; int64_t cvlenx = (GB_IS_ANY_PAIR_SEMIRING ? 0 : cvlen) * GB_CSIZE ; int64_t bvlenb = (GB_B_IS_BITMAP ? bvlen : 0) ; size_t gfspace = gwork * bvlenb ; size_t wfspace = gfspace + hwork * cvlen ; size_t wbxspace = gwork * bvlenx ; size_t wcxspace = hwork * cvlenx ; Wf = GB_MALLOC_WERK (wfspace, int8_t, &Wf_size) ; Wbx = GB_MALLOC_WERK (wbxspace, GB_void, &Wbx_size) ; Wcx = GB_MALLOC_WERK (wcxspace, GB_void, &Wcx_size) ; if (Wf == NULL || Wcx == NULL || Wbx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // C<#M> += A*B //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vectors of B and C for this coarse task //------------------------------------------------------------------ int64_t jstart, jend ; GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ; int64_t jtask = jend - jstart ; int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ; int64_t task_cnvals = 0 ; //------------------------------------------------------------------ // get the workspace for this task //------------------------------------------------------------------ // Gb and Gx workspace to load the panel of B int8_t *restrict Gb = Wf + G_slice [tid] * bvlenb ; GB_BTYPE *restrict Gx = (GB_BTYPE *) (Wbx + G_slice [tid] * bvlenx) ; // Hf and Hx workspace to compute the panel of C int8_t *restrict Hf = Wf + (H_slice [tid] * cvlen) + gfspace ; GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + H_slice [tid] * cvlenx) ; #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // clear the panel //------------------------------------------------------------------ memset (Hf, 0, jpanel * cvlen) ; //------------------------------------------------------------------ // C<#M>(:,jstart:jend-1) += A * B(:,jstart:jend-1) by panel //------------------------------------------------------------------ for (int64_t j1 = jstart ; j1 < jend ; j1 += jpanel) { //-------------------------------------------------------------- // get the panel of np vectors j1:j2-1 //-------------------------------------------------------------- int64_t j2 = GB_IMIN (jend, j1 + jpanel) ; int64_t np = j2 - j1 ; //-------------------------------------------------------------- // load and transpose B(:,j1:j2-1) for one panel //-------------------------------------------------------------- #if GB_B_IS_BITMAP { if (np == 1) { // no need to load a single vector of B Gb = (int8_t *) (Bb + (j1 * bvlen)) ; } else { // load and transpose the bitmap of B(:,j1:j2-1) for (int64_t jj = 0 ; jj < np ; jj++) { int64_t j = j1 + jj ; for (int64_t i = 0 ; i < bvlen ; i++) { Gb [i*np + jj] = Bb [i + j * bvlen] ; } } } } #endif if (!B_is_pattern) { if (np == 1) { // no need to load a single vector of B GB_void *restrict Bx = (GB_void *) (B->x) ; Gx = (GB_BTYPE *) (Bx + (j1 * bvlen) * GB_BSIZE) ; } else { // load and transpose the values of B(:,j1:j2-1) for (int64_t jj = 0 ; jj < np ; jj++) { int64_t j = j1 + jj ; for (int64_t i = 0 ; i < bvlen ; i++) { // G(i,jj) = B(i,j), and change storage order int64_t pG = i*np + jj ; int64_t pB = i + j * bvlen ; GB_LOADB (Gx, pG, Bx, pB) ; } } } } //-------------------------------------------------------------- // H = A*G for one panel //-------------------------------------------------------------- for (int64_t kA = 0 ; kA < anvec ; kA++) { //---------------------------------------------------------- // get A(:,k) //---------------------------------------------------------- int64_t k = GBH (Ah, kA) ; int64_t pA = Ap [kA] ; int64_t pA_end = Ap [kA+1] ; int64_t pG = k * np ; #undef GB_MULT_A_ik_G_kjj #if GB_IS_PAIR_MULTIPLIER // t = A(i,k) * G (k,jj) is always equal to 1 #define GB_MULT_A_ik_G_kjj(jj) #else // t = A(i,k) * G (k,jj) GB_CIJ_DECLARE (t) ; #define GB_MULT_A_ik_G_kjj(jj) \ GB_GETB (gkj, Gx, pG+jj) ; \ GB_MULT (t, aik, gkj, i, k, j1 + jj) ; #endif #undef GB_HX_COMPUTE #define GB_HX_COMPUTE(jj) \ { \ /* H (i,jj) += A(i,k)*G(k,jj) */ \ if (!GB_B_IS_BITMAP || Gb [pG+jj]) \ { \ GB_MULT_A_ik_G_kjj (jj) ; \ if (Hf [pH+jj] == 0) \ { \ /* H(i,jj) is a new entry */ \ GB_HX_WRITE (pH+jj, t) ; /* Hx(i,jj)=t */ \ Hf [pH+jj] = 1 ; \ } \ else \ { \ /* H(i,jj) is already present */ \ GB_HX_UPDATE (pH+jj, t) ; /* Hx(i,jj)+=t */ \ } \ } \ } #undef GB_LOAD_A_ij #define GB_LOAD_A_ij \ int64_t i = Ai [pA] ; \ GB_GETA (aik, Ax, pA) ; \ int64_t pH = i * np ; //---------------------------------------------------------- // H += A(:,k)*G(k,:) //---------------------------------------------------------- #if GB_B_IS_BITMAP bool gb = false ; switch (np) { case 4 : gb = Gb [pG+3] ; case 3 : gb |= Gb [pG+2] ; case 2 : gb |= Gb [pG+1] ; case 1 : gb |= Gb [pG ] ; default: ; } if (gb) #endif { switch (np) { case 4 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; GB_HX_COMPUTE (2) ; GB_HX_COMPUTE (3) ; } break ; case 3 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; GB_HX_COMPUTE (2) ; } break ; case 2 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; } break ; case 1 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; } break ; default:; } } #undef GB_MULT_A_ik_G_kjj #undef GB_HX_COMPUTE #undef GB_LOAD_A_ij } //-------------------------------------------------------------- // C<#M>(:,j1:j2-1) += H //-------------------------------------------------------------- for (int64_t jj = 0 ; jj < np ; jj++) { //---------------------------------------------------------- // C<#M>(:,j) += H (:,jj) //---------------------------------------------------------- int64_t j = j1 + jj ; int64_t pC_start = j * cvlen ; // get pointer to C(:,j) for (int64_t i = 0 ; i < cvlen ; i++) { int64_t pC = pC_start + i ; // pointer to C(i,j) int64_t pH = i * np + jj ; // pointer to H(i,jj) if (!Hf [pH]) continue ; Hf [pH] = 0 ; // clear the panel int8_t cb = Cb [pC] ; //------------------------------------------------------ // check M(i,j) //------------------------------------------------------ #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse bool mij = ((cb & 2) != 0) ^ Mask_comp ; if (!mij) continue ; cb = (cb & 1) ; #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; #endif //------------------------------------------------------ // C(i,j) += H(i,jj) //------------------------------------------------------ if (cb == 0) { // C(i,j) = H(i,jj) #if GB_IS_ANY_PAIR_SEMIRING Cx [pC] = GB_CTYPE_CAST (1, 0) ; // C(i,j) = 1 #else GB_CIJ_GATHER (pC, pH) ; #endif Cb [pC] = keep ; task_cnvals++ ; } else { // Currently, the matrix C is a newly allocated // matrix, not the C_in input matrix to GrB_mxm. // As a result, this condition is not used. It // will be in the future when this method is // modified to modify C in-place. ASSERT (GB_DEAD_CODE) ; // C(i,j) += H(i,jj) GB_CIJ_GATHER_UPDATE (pC, pH) ; } } } } cnvals += task_cnvals ; } #undef GB_PANEL_SIZE } else if (use_atomics) { //---------------------------------------------------------------------- // C<#M> += A*B using fine tasks and atomics //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vector of B and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(:,j) and B(:,j). Its fine task // id ranges from 0 to nfine_tasks_per_vector-1, and determines // which slice of A to operate on. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t kfirst = A_slice [fine_tid] ; int64_t klast = A_slice [fine_tid + 1] ; int64_t pB_start = j * bvlen ; // pointer to B(:,j) int64_t pC_start = j * cvlen ; // pointer to C(:,j) GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ* int64_t task_cnvals = 0 ; // for Hx Gustavason workspace: use C(:,j) in-place: GB_CTYPE *restrict Hx = (GB_CTYPE *) (((GB_void *) Cx) + (pC_start * GB_CSIZE)) ; #if GB_IS_PLUS_FC32_MONOID || GB_IS_ANY_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID || GB_IS_ANY_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // C<#M>(:,j) += A(:,k1:k2) * B(k1:k2,j) //------------------------------------------------------------------ for (int64_t kk = kfirst ; kk < klast ; kk++) { //-------------------------------------------------------------- // C<#M>(:,j) += A(:,k) * B(k,j) //-------------------------------------------------------------- int64_t k = GBH (Ah, kk) ; // k in range k1:k2 int64_t pB = pB_start + k ; // get pointer to B(k,j) if (!GBB (Bb, pB)) continue ; int64_t pA = Ap [kk] ; int64_t pA_end = Ap [kk+1] ; GB_GET_B_kj ; // bkj = B(k,j) for ( ; pA < pA_end ; pA++) { //---------------------------------------------------------- // get A(i,k) and C(i,j) //---------------------------------------------------------- int64_t i = Ai [pA] ; // get A(i,k) index int64_t pC = pC_start + i ; // get C(i,j) pointer int8_t cb ; //---------------------------------------------------------- // C<#M>(i,j) += A(i,k) * B(k,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { //------------------------------------------------------ // M is sparse, and scattered into the C bitmap //------------------------------------------------------ // finite-state machine in Cb [pC]: // 0: cij not present, mij zero // 1: cij present, mij zero (keep==1 for !M) // 2: cij not present, mij one // 3: cij present, mij one (keep==3 for M) // 7: cij is locked #if GB_HAS_ATOMIC { // if C(i,j) is already present and can be modified // (cb==keep), and the monoid can be done // atomically, then do the atomic update. No need // to modify Cb [pC]. GB_ATOMIC_READ cb = Cb [pC] ; // grab the entry if (cb == keep) { #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif continue ; // C(i,j) has been updated } } #endif do // lock the entry { // do this atomically: // { cb = Cb [pC] ; Cb [pC] = 7 ; } GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ; } while (cb == 7) ; // lock owner gets 0, 1, 2, or 3 if (cb == keep-1) { // C(i,j) is a new entry GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) #if GB_IS_ANY_PAIR_SEMIRING GB_ATOMIC_SET_HX_ONE (i) ; // C(i,j) = 1 #else GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t #endif task_cnvals++ ; cb = keep ; // keep the entry } else if (cb == keep) { // C(i,j) is already present #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif } GB_ATOMIC_WRITE Cb [pC] = cb ; // unlock the entry } #else { //------------------------------------------------------ // M is not present, or bitmap/full //------------------------------------------------------ // finite-state machine in Cb [pC]: // 0: cij not present; can be written // 1: cij present; can be updated // 7: cij is locked #if GB_MASK_IS_BITMAP_OR_FULL { // M is bitmap or full, and not in C bitmap. // Do not modify C(i,j) if not permitted by the mask GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; } #endif //------------------------------------------------------ // C(i,j) += A(i,j) * B(k,j) //------------------------------------------------------ #if GB_HAS_ATOMIC { // if C(i,j) is already present (cb==1), and the // monoid can be done atomically, then do the // atomic update. No need to modify Cb [pC]. GB_ATOMIC_READ cb = Cb [pC] ; // grab the entry if (cb == 1) { #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif continue ; // C(i,j) has been updated } } #endif do // lock the entry { // do this atomically: // { cb = Cb [pC] ; Cb [pC] = 7 ; } GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ; } while (cb == 7) ; // lock owner gets 0 or 1 if (cb == 0) { // C(i,j) is a new entry GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) #if GB_IS_ANY_PAIR_SEMIRING GB_ATOMIC_SET_HX_ONE (i) ; // C(i,j) = 1 #else GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t #endif task_cnvals++ ; } else // cb == 1 { // C(i,j) is already present #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif } GB_ATOMIC_WRITE Cb [pC] = 1 ; // unlock the entry } #endif } } cnvals += task_cnvals ; } } else { //---------------------------------------------------------------------- // C<#M> += A*B using fine tasks and workspace, with no atomics //---------------------------------------------------------------------- // Each fine task is given size-cvlen workspace to compute its result // in the first phase, W(:,tid) = A(:,k1:k2) * B(k1:k2,j), where k1:k2 // is defined by the fine_tid of the task. The workspaces are then // summed into C in the second phase. //---------------------------------------------------------------------- // allocate workspace //---------------------------------------------------------------------- size_t workspace = cvlen * ntasks ; size_t cxsize = (GB_IS_ANY_PAIR_SEMIRING) ? 0 : GB_CSIZE ; Wf = GB_MALLOC_WERK (workspace, int8_t, &Wf_size) ; Wcx = GB_MALLOC_WERK (workspace * cxsize, GB_void, &Wcx_size) ; if (Wf == NULL || Wcx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // first phase: W (:,tid) = A (:,k1:k2) * B (k2:k2,j) for each fine task //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vector of B and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(:,j) and B(:,j). Its fine task // id ranges from 0 to nfine_tasks_per_vector-1, and determines // which slice of A to operate on. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t kfirst = A_slice [fine_tid] ; int64_t klast = A_slice [fine_tid + 1] ; int64_t pB_start = j * bvlen ; // pointer to B(:,j) int64_t pC_start = j * cvlen ; // pointer to C(:,j), for bitmap int64_t pW_start = tid * cvlen ; // pointer to W(:,tid) GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ* int64_t task_cnvals = 0 ; // for Hf and Hx Gustavason workspace: use W(:,tid): int8_t *restrict Hf = Wf + pW_start ; GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + (pW_start * cxsize)) ; #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // clear Hf //------------------------------------------------------------------ memset (Hf, 0, cvlen) ; //------------------------------------------------------------------ // W<#M> = A(:,k1:k2) * B(k1:k2,j) //------------------------------------------------------------------ for (int64_t kk = kfirst ; kk < klast ; kk++) { //-------------------------------------------------------------- // W<#M>(:,tid) += A(:,k) * B(k,j) //-------------------------------------------------------------- int64_t k = GBH (Ah, kk) ; // k in range k1:k2 int64_t pB = pB_start + k ; // get pointer to B(k,j) if (!GBB (Bb, pB)) continue ; int64_t pA = Ap [kk] ; int64_t pA_end = Ap [kk+1] ; GB_GET_B_kj ; // bkj = B(k,j) for ( ; pA < pA_end ; pA++) { //---------------------------------------------------------- // get A(i,k) //---------------------------------------------------------- int64_t i = Ai [pA] ; // get A(i,k) index //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { // M is sparse or hypersparse int64_t pC = pC_start + i ; int8_t cb = Cb [pC] ; bool mij = ((cb & 2) != 0) ^ Mask_comp ; if (!mij) continue ; } #elif GB_MASK_IS_BITMAP_OR_FULL { // M is bitmap or full int64_t pC = pC_start + i ; GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; } #endif //---------------------------------------------------------- // W<#M>(i) += A(i,k) * B(k,j) //---------------------------------------------------------- #if GB_IS_ANY_PAIR_SEMIRING { // Hx is not used; Cx [...] = 1 is done below Hf [i] = 1 ; } #else { GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) if (Hf [i] == 0) { // W(i,j) is a new entry GB_HX_WRITE (i, t) ; // Hx(i) = t Hf [i] = 1 ; } else { // W(i) is already present GB_HX_UPDATE (i, t) ; // Hx(i) += t } } #endif } } } //---------------------------------------------------------------------- // second phase: C<#M> += reduce (W) //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the W and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(i1:i2,j) and W(i1:i2,w1:w2), where // i1:i2 is defined by the fine task id. Its fine task id ranges // from 0 to nfine_tasks_per_vector-1. // w1:w2 are the updates to C(:,j), where w1:w2 = // [j*nfine_tasks_per_vector : (j+1)*nfine_tasks_per_vector-1]. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t istart, iend ; GB_PARTITION (istart, iend, cvlen, fine_tid, nfine_tasks_per_vector) ; int64_t pC_start = j * cvlen ; // pointer to C(:,j) int64_t wstart = j * nfine_tasks_per_vector ; int64_t wend = (j + 1) * nfine_tasks_per_vector ; int64_t task_cnvals = 0 ; // Hx = (typecasted) Wcx workspace, use Wf as-is GB_CTYPE *restrict Hx = ((GB_CTYPE *) Wcx) ; #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // C<#M>(i1:i2,j) += reduce (W (i2:i2, wstart:wend)) //------------------------------------------------------------------ for (int64_t w = wstart ; w < wend ; w++) { //-------------------------------------------------------------- // C<#M>(i1:i2,j) += W (i1:i2,w) //-------------------------------------------------------------- int64_t pW_start = w * cvlen ; // pointer to W (:,w) for (int64_t i = istart ; i < iend ; i++) { //---------------------------------------------------------- // get pointer and bitmap C(i,j) and W(i,w) //---------------------------------------------------------- int64_t pW = pW_start + i ; // pointer to W(i,w) if (Wf [pW] == 0) continue ; // skip if not present int64_t pC = pC_start + i ; // pointer to C(i,j) int8_t cb = Cb [pC] ; // bitmap status of C(i,j) //---------------------------------------------------------- // M(i,j) already checked, but adjust Cb if M is sparse //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { // M is sparse or hypersparse cb = (cb & 1) ; } #endif //---------------------------------------------------------- // C(i,j) += W (i,w) //---------------------------------------------------------- if (cb == 0) { // C(i,j) = W(i,w) #if GB_IS_ANY_PAIR_SEMIRING Cx [pC] = GB_CTYPE_CAST (1, 0) ; // C(i,j) = 1 #else GB_CIJ_GATHER (pC, pW) ; #endif Cb [pC] = keep ; task_cnvals++ ; } else { // C(i,j) += W(i,w) GB_CIJ_GATHER_UPDATE (pC, pW) ; } } } cnvals += task_cnvals ; } } }
GB_subassign_15.c
//------------------------------------------------------------------------------ // GB_subassign_15: C(I,J)<!M> += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 15: C(I,J)<!M> += scalar ; using S // M: present // Mask_comp: true // C_replace: false // accum: present // A: scalar // S: constructed #include "GB_subassign_methods.h" GrB_Info GB_subassign_15 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; const bool C_is_hyper = C->is_hyper ; const int64_t Cnvec = C->nvec ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; GB_GET_MASK ; const bool M_is_hyper = M->is_hyper ; const int64_t Mnvec = M->nvec ; GB_GET_S ; const int64_t *restrict Sh = S->h ; const int64_t Snvec = S->nvec ; const bool S_is_hyper = S->is_hyper ; GB_GET_ACCUM_SCALAR ; //-------------------------------------------------------------------------- // Method 15: C(I,J)<!M> += scalar ; using S //-------------------------------------------------------------------------- // Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is // required. The sparsity of !M cannot be exploited. // Methods 13, 15, 17, and 19 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; GB_GET_VECTOR_FOR_IXJ (M) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M> += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ; int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] cast_M (&mij, Mx +(pM*msize), 0) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // accumulate the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { // both S (i,j) and A (i,j) present if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_scalar ; } GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; GB_GET_VECTOR_FOR_IXJ (M) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M> += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ; int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] cast_M (&mij, Mx +(pM*msize), 0) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // accumulate the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/ASTContext.h" #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// Representation of an OpenMP canonical loop. /// /// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form /// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form /// OpenMP 4.0, section 2.6 Canonical Loop Form /// OpenMP 4.5, section 2.6 Canonical Loop Form /// OpenMP 5.0, section 2.9.1 Canonical Loop Form /// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form /// /// An OpenMP canonical loop is a for-statement or range-based for-statement /// with additional requirements that ensure that the number of iterations is /// known before entering the loop and allow skipping to an arbitrary iteration. /// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is /// known to fulfill OpenMP's canonical loop requirements because of being /// associated to an OMPLoopBasedDirective. That is, the general structure is: /// /// OMPLoopBasedDirective /// [`- CapturedStmt ] /// [ `- CapturedDecl] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- Stmt /// /// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some /// directives such as OMPParallelForDirective, but others do not need them /// (such as OMPTileDirective). In The OMPCanonicalLoop and /// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the /// directive. A OMPCanonicalLoop must not appear in the AST unless associated /// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the /// OMPCanonicalLoop may also be wrapped in a CompoundStmt: /// /// [...] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- CompoundStmt /// |- Leading in-between code (if any) /// |- OMPCanonicalLoop /// | `- ForStmt/CXXForRangeStmt /// | `- ... /// `- Trailing in-between code (if any) /// /// The leading/trailing in-between code must not itself be a OMPCanonicalLoop /// to avoid confusion which loop belongs to the nesting. /// /// There are three different kinds of iteration variables for different /// purposes: /// * Loop user variable: The user-accessible variable with different value for /// each iteration. /// * Loop iteration variable: The variable used to identify a loop iteration; /// for range-based for-statement, this is the hidden iterator '__begin'. For /// other loops, it is identical to the loop user variable. Must be a /// random-access iterator, pointer or integer type. /// * Logical iteration counter: Normalized loop counter starting at 0 and /// incrementing by one at each iteration. Allows abstracting over the type /// of the loop iteration variable and is always an unsigned integer type /// appropriate to represent the range of the loop iteration variable. Its /// value corresponds to the logical iteration number in the OpenMP /// specification. /// /// This AST node provides two captured statements: /// * The distance function which computes the number of iterations. /// * The loop user variable function that computes the loop user variable when /// given a logical iteration number. /// /// These captured statements provide the link between C/C++ semantics and the /// logical iteration counters used by the OpenMPIRBuilder which is /// language-agnostic and therefore does not know e.g. how to advance a /// random-access iterator. The OpenMPIRBuilder will use this information to /// apply simd, workshare-loop, distribute, taskloop and loop directives to the /// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an /// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an /// OMPLoopDirective and skipped when searching for the associated syntactical /// loop. /// /// Example: /// <code> /// std::vector<std::string> Container{1,2,3}; /// for (std::string Str : Container) /// Body(Str); /// </code> /// which is syntactic sugar for approximately: /// <code> /// auto &&__range = Container; /// auto __begin = std::begin(__range); /// auto __end = std::end(__range); /// for (; __begin != __end; ++__begin) { /// std::String Str = *__begin; /// Body(Str); /// } /// </code> /// In this example, the loop user variable is `Str`, the loop iteration /// variable is `__begin` of type `std::vector<std::string>::iterator` and the /// logical iteration number type is `size_t` (unsigned version of /// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`). /// Therefore, the distance function will be /// <code> /// [&](size_t &Result) { Result = __end - __begin; } /// </code> /// and the loop variable function is /// <code> /// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) { /// Result = __begin + Logical; /// } /// </code> /// The variable `__begin`, aka the loop iteration variable, is captured by /// value because it is modified in the loop body, but both functions require /// the initial value. The OpenMP specification explicitly leaves unspecified /// when the loop expressions are evaluated such that a capture by reference is /// sufficient. class OMPCanonicalLoop : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Children of this AST node. enum { LOOP_STMT, DISTANCE_FUNC, LOOPVAR_FUNC, LOOPVAR_REF, LastSubStmt = LOOPVAR_REF }; private: /// This AST node's children. Stmt *SubStmts[LastSubStmt + 1] = {}; OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {} public: /// Create a new OMPCanonicalLoop. static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt, CapturedStmt *DistanceFunc, CapturedStmt *LoopVarFunc, DeclRefExpr *LoopVarRef) { OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop(); S->setLoopStmt(LoopStmt); S->setDistanceFunc(DistanceFunc); S->setLoopVarFunc(LoopVarFunc); S->setLoopVarRef(LoopVarRef); return S; } /// Create an empty OMPCanonicalLoop for deserialization. static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) { return new (Ctx) OMPCanonicalLoop(); } static bool classof(const Stmt *S) { return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass; } SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); } SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); } /// Return this AST node's children. /// @{ child_range children() { return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } /// @} /// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt). /// @{ Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; } const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; } void setLoopStmt(Stmt *S) { assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) && "Canonical loop must be a for loop (range-based or otherwise)"); SubStmts[LOOP_STMT] = S; } /// @} /// The function that computes the number of loop iterations. Can be evaluated /// before entering the loop but after the syntactical loop's init /// statement(s). /// /// Function signature: void(LogicalTy &Result) /// Any values necessary to compute the distance are captures of the closure. /// @{ CapturedStmt *getDistanceFunc() { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } const CapturedStmt *getDistanceFunc() const { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } void setDistanceFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[DISTANCE_FUNC] = S; } /// @} /// The function that computes the loop user variable from a logical iteration /// counter. Can be evaluated as first statement in the loop. /// /// Function signature: void(LoopVarTy &Result, LogicalTy Number) /// Any other values required to compute the loop user variable (such as start /// value, step size) are captured by the closure. In particular, the initial /// value of loop iteration variable is captured by value to be unaffected by /// previous iterations. /// @{ CapturedStmt *getLoopVarFunc() { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } const CapturedStmt *getLoopVarFunc() const { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } void setLoopVarFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[LOOPVAR_FUNC] = S; } /// @} /// Reference to the loop user variable as accessed in the loop body. /// @{ DeclRefExpr *getLoopVarRef() { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } const DeclRefExpr *getLoopVarRef() const { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } void setLoopVarRef(DeclRefExpr *E) { assert(E && "Expected non-null loop variable"); SubStmts[LOOPVAR_REF] = E; } /// @} }; /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Kind of the directive. OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { if (!Data) return llvm::None; return Data->getClauses(); } protected: /// Data, associated with the directive. OMPChildren *Data = nullptr; /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)) {} template <typename T, typename... Params> static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses, AssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T, typename... Params> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T; Inst->Data = Data; return Inst; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> static const SpecificClause *getSingleClause(ArrayRef<OMPClause *> Clauses) { auto ClausesOfKind = getClausesOfKind<SpecificClause>(Clauses); if (ClausesOfKind.begin() != ClausesOfKind.end()) { assert(std::next(ClausesOfKind.begin()) == ClausesOfKind.end() && "There are at least 2 clauses of the specified kind"); return *ClausesOfKind.begin(); } return nullptr; } template <typename SpecificClause> const SpecificClause *getSingleClause() const { return getSingleClause<SpecificClause>(clauses()); } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { if (!Data) return 0; return Data->getNumClauses(); } /// Returns specified clause. /// /// \param I Number of clause. /// OMPClause *getClause(unsigned I) const { return clauses()[I]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getAssociatedStmt(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getCapturedStmt(RegionKind, CaptureRegions); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getInnermostCapturedStmt(CaptureRegions); } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!Data) return child_range(child_iterator(), child_iterator()); return Data->getAssociatedStmtAsRange(); } const_child_range children() const { return const_cast<OMPExecutableDirective *>(this)->children(); } ArrayRef<OMPClause *> clauses() const { if (!Data) return llvm::None; return Data->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const { return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock(); } Stmt *getStructuredBlock(); const Stmt *getRawStmt() const { return const_cast<OMPExecutableDirective *>(this)->getRawStmt(); } Stmt *getRawStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getRawStmt(); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelDirective() : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// The base class for all loop-based directives, including loop transformation /// directives. class OMPLoopBasedDirective : public OMPExecutableDirective { friend class ASTStmtReader; protected: /// Number of collapsed loops as specified by 'collapse' clause. unsigned NumAssociatedLoops = 0; /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param NumAssociatedLoops Number of loops associated with the construct. /// OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc), NumAssociatedLoops(NumAssociatedLoops) {} public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned I = 0; I < Size; ++I) { Counters[I] = nullptr; PrivateCounters[I] = nullptr; Inits[I] = nullptr; Updates[I] = nullptr; Finals[I] = nullptr; DependentCounters[I] = nullptr; DependentInits[I] = nullptr; FinalsConditions[I] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getLoopsNumber() const { return NumAssociatedLoops; } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback, llvm::function_ref<void(OMPLoopBasedDirective *)> OnTransformationCallback); static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback, llvm::function_ref<void(const OMPLoopBasedDirective *)> OnTransformationCallback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; auto &&NewTransformCb = [OnTransformationCallback](OMPLoopBasedDirective *A) { OnTransformationCallback(A); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback, NewTransformCb); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback) { auto &&TransformCb = [](OMPLoopBasedDirective *) {}; return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback, TransformCb); } static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, const Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } /// Calls the specified callback function for all the loop bodies in \p /// CurStmt, from the outermost loop to the innermost. static void doForAllLoopsBodies( Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback); static void doForAllLoopsBodies( const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) { Callback(Cnt, Loop, Body); }; doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } static bool classof(const Stmt *T) { if (auto *D = dyn_cast<OMPExecutableDirective>(T)) return isOpenMPLoopDirective(D->getDirectiveKind()); return false; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPLoopBasedDirective { friend class ASTStmtReader; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length NumAssociatedLoops are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { IterationVariableOffset = 0, LastIterationOffset = 1, CalcLastIterationOffset = 2, PreConditionOffset = 3, CondOffset = 4, InitOffset = 5, IncOffset = 6, PreInitsOffset = 7, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 8, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 8, LowerBoundVariableOffset = 9, UpperBoundVariableOffset = 10, StrideVariableOffset = 11, EnsureUpperBoundOffset = 12, NextLowerBoundOffset = 13, NextUpperBoundOffset = 14, NumIterationsOffset = 15, // Offset to the end for worksharing loop directives. WorksharingEnd = 16, PrevLowerBoundVariableOffset = 16, PrevUpperBoundVariableOffset = 17, DistIncOffset = 18, PrevEnsureUpperBoundOffset = 19, CombinedLowerBoundVariableOffset = 20, CombinedUpperBoundVariableOffset = 21, CombinedEnsureUpperBoundOffset = 22, CombinedInitOffset = 23, CombinedConditionOffset = 24, CombinedNextLowerBoundOffset = 25, CombinedNextUpperBoundOffset = 26, CombinedDistConditionOffset = 27, CombinedParForInDistConditionOffset = 28, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 29, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind())]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 2 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 3 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 4 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 5 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 6 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 7 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { Data->getChildren()[IterationVariableOffset] = IV; } void setLastIteration(Expr *LI) { Data->getChildren()[LastIterationOffset] = LI; } void setCalcLastIteration(Expr *CLI) { Data->getChildren()[CalcLastIterationOffset] = CLI; } void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; } void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; } void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; } void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[IsLastIterVariableOffset] = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[LowerBoundVariableOffset] = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[UpperBoundVariableOffset] = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[StrideVariableOffset] = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[EnsureUpperBoundOffset] = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextLowerBoundOffset] = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextUpperBoundOffset] = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NumIterationsOffset] = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[DistIncOffset] = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedInitOffset] = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedConditionOffset] = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedDistConditionOffset] = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedParForInDistConditionOffset] = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: Expr *getIterationVariable() const { return cast<Expr>(Data->getChildren()[IterationVariableOffset]); } Expr *getLastIteration() const { return cast<Expr>(Data->getChildren()[LastIterationOffset]); } Expr *getCalcLastIteration() const { return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]); } Expr *getPreCond() const { return cast<Expr>(Data->getChildren()[PreConditionOffset]); } Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); } Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); } Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); } const Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[StrideVariableOffset]); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NumIterationsOffset]); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[DistIncOffset]); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedInitOffset]); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedConditionOffset]); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionsDirective() : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSingleDirective() : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc), DirName(Name) {} /// Build an empty directive. /// explicit OMPCriticalDirective() : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation()) {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master' directive. /// /// \code /// #pragma omp parallel master private(a,b) /// \endcode /// In this example directive '#pragma omp parallel master' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc) {} explicit OMPParallelMasterDirective() : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelMasterDirective *>(this) ->getTaskReductionRefExpr(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelSectionsDirective() : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelSectionsDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if this directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskDirective() : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation()) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskgroupDirective() : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation()) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef(); } Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPFlushDirective() : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp depobj' directive. /// /// \code /// #pragma omp depobj(a) depend(in:x,y) /// \endcode /// In this example directive '#pragma omp depobj' initializes a depobj object /// 'a' with dependence type 'in' and a list with 'x' and 'y' locators. class OMPDepobjDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDepobjDirective() : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPDepobjDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDepobjDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDepobjDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPOrderedDirective() : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// \param IsStandalone true, if the the standalone directive is created. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, bool IsStandalone, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart = false; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPAtomicDirective() : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation()) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { Data->getChildren()[0] = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { Data->getChildren()[1] = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { Data->getChildren()[2] = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { Data->getChildren()[3] = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getX() const { return cast_or_null<Expr>(Data->getChildren()[0]); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(Data->getChildren()[1]); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(Data->getChildren()[1]); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[2]); } const Expr *getV() const { return cast_or_null<Expr>(Data->getChildren()[2]); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[3]); } const Expr *getExpr() const { return cast_or_null<Expr>(Data->getChildren()[3]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDirective() : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDataDirective() : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetEnterDataDirective() : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetExitDataDirective() : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetParallelDirective() : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTeamsDirective() : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// statements and child expressions. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc) {} /// Build an empty directive. explicit OMPCancellationPointDirective() : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPCancelDirective() : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetUpdateDirective() : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc) { } /// Build an empty directive. /// explicit OMPTargetTeamsDirective() : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents the '#pragma omp tile' loop transformation directive. class OMPTileDirective final : public OMPLoopBasedDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumLoops) : OMPLoopBasedDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile, StartLoc, EndLoc, NumLoops) {} void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp tile'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param NumLoops Number of associated loops (number of items in the /// 'sizes' clause). /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, unsigned NumLoops, Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp tile' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. /// \param NumLoops Number of associated loops to allocate. static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned NumLoops); unsigned getNumAssociatedLoops() const { return getLoopsNumber(); } /// Gets/sets the associated loops after tiling. /// /// This is in de-sugared format stored as a CompoundStmt. /// /// \code /// for (...) /// ... /// \endcode /// /// Note that if the generated loops a become associated loops of another /// directive, they may need to be hoisted before them. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return preinits statement. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTileDirectiveClass; } }; /// This represents the '#pragma omp unroll' loop transformation directive. /// /// \code /// #pragma omp unroll /// for (int i = 0; i < 64; ++i) /// \endcode class OMPUnrollDirective final : public OMPLoopBasedDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPLoopBasedDirective(OMPUnrollDirectiveClass, llvm::omp::OMPD_unroll, StartLoc, EndLoc, 1) {} /// Set the pre-init statements. void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } /// Set the de-sugared statement. void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp unroll'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPUnrollDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp unroll' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. static OMPUnrollDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses); /// Get the de-sugared associated loops after unrolling. /// /// This is only used if the unrolled loop becomes an associated loop of /// another directive, otherwise the loop is emitted directly using loop /// transformation metadata. When the unrolled loop cannot be used by another /// directive (e.g. because of the full clause), the transformed stmt can also /// be nullptr. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return the pre-init statements. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPUnrollDirectiveClass; } }; /// This represents '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' with /// list item 'a'. class OMPScanDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPScanDirective() : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPScanDirectiveClass; } }; /// This represents '#pragma omp interop' directive. /// /// \code /// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait /// \endcode /// In this example directive '#pragma omp interop' has /// clauses 'init', 'device', 'depend' and 'nowait'. /// class OMPInteropDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending location of the directive. /// OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPInteropDirective() : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending Location of the directive. /// \param Clauses The directive's clauses. /// static OMPInteropDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive. /// /// \param C AST context. /// static OMPInteropDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPInteropDirectiveClass; } }; /// This represents '#pragma omp dispatch' directive. /// /// \code /// #pragma omp dispatch device(dnum) /// \endcode /// This example shows a directive '#pragma omp dispatch' with a /// device clause with variable 'dnum'. /// class OMPDispatchDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// The location of the target-call. SourceLocation TargetCallLoc; /// Set the location of the target-call. void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; } /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDispatchDirective() : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TargetCallLoc Location of the target-call. /// static OMPDispatchDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, SourceLocation TargetCallLoc); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDispatchDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return location of target-call. SourceLocation getTargetCallLoc() const { return TargetCallLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDispatchDirectiveClass; } }; /// This represents '#pragma omp masked' directive. /// \code /// #pragma omp masked filter(tid) /// \endcode /// This example shows a directive '#pragma omp masked' with a filter clause /// with variable 'tid'. /// class OMPMaskedDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMaskedDirective() : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMaskedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMaskedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMaskedDirectiveClass; } }; } // end namespace clang #endif
GB_to_hyper.c
//------------------------------------------------------------------------------ // GB_to_hyper: convert a matrix to hyperspasre //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // On input, the matrix may have shallow A->p content; it is safely removed. // On output, the matrix is always hypersparse (even if out of memory). If the // input matrix is non-hypersparse, it is given new A->p and A->h that are not // shallow. If the input matrix is already hypersparse, nothing is changed // (and in that case A->p and A->h remain shallow on output if shallow on // input). The A->x and A->i content is not changed; it remains in whatever // shallow/non-shallow state that it had on input). // If an out-of-memory condition occurs, all content of the matrix is cleared. // The input matrix may be jumbled; this is not an error condition. #include "GB.h" GrB_Info GB_to_hyper // convert a matrix to hypersparse ( GrB_Matrix A, // matrix to convert to hypersparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK_OR_JUMBLED (A, "A converting to hypersparse", GB0) ; int64_t anz = GB_NNZ (A) ; ASSERT (GB_ZOMBIES_OK (A)) ; //-------------------------------------------------------------------------- // convert A to hypersparse form //-------------------------------------------------------------------------- if (!A->is_hyper) { //---------------------------------------------------------------------- // determine the number of threads to use //---------------------------------------------------------------------- int64_t n = A->vdim ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; ntasks = GB_IMIN (ntasks, n) ; ntasks = GB_IMAX (ntasks, 1) ; //---------------------------------------------------------------------- // count the number of non-empty vectors in A in each slice //---------------------------------------------------------------------- A->is_hyper = true ; // A becomes hypersparse ASSERT (A->h == NULL) ; ASSERT (A->nvec == A->plen && A->plen == n) ; const int64_t *GB_RESTRICT Ap_old = A->p ; bool Ap_old_shallow = A->p_shallow ; int64_t *GB_RESTRICT Count = GB_MALLOC (ntasks+1, int64_t) ; if (Count == NULL) { // out of memory GB_PHIX_FREE (A) ; return (GB_OUT_OF_MEMORY) ; } int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, my_nvec_nonempty = 0 ; ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) my_nvec_nonempty++ ; } Count [tid] = my_nvec_nonempty ; } //---------------------------------------------------------------------- // compute cumulative sum of Counts and nvec_nonempty //---------------------------------------------------------------------- GB_cumsum (Count, ntasks, NULL, 1) ; int64_t nvec_nonempty = Count [ntasks] ; A->nvec_nonempty = nvec_nonempty ; //---------------------------------------------------------------------- // allocate the new A->p and A->h //---------------------------------------------------------------------- int64_t *GB_RESTRICT Ap_new = GB_MALLOC (nvec_nonempty+1, int64_t) ; int64_t *GB_RESTRICT Ah_new = GB_MALLOC (nvec_nonempty , int64_t) ; if (Ap_new == NULL || Ah_new == NULL) { // out of memory GB_FREE (Count) ; GB_FREE (Ap_new) ; GB_FREE (Ah_new) ; GB_PHIX_FREE (A) ; return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // transplant the new A->p and A->h into the matrix //---------------------------------------------------------------------- A->plen = nvec_nonempty ; A->nvec = nvec_nonempty ; A->p = Ap_new ; A->h = Ah_new ; A->p_shallow = false ; A->h_shallow = false ; //---------------------------------------------------------------------- // construct the new hyperlist in the new A->p and A->h //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) { // vector index j is the kth vector in the new Ah Ap_new [k] = Ap_old [j] ; Ah_new [k] = j ; k++ ; } } ASSERT (k == Count [tid+1]) ; } Ap_new [nvec_nonempty] = anz ; A->magic = GB_MAGIC ; ASSERT (A->nvec_nonempty == GB_nvec_nonempty (A, Context)) ; //---------------------------------------------------------------------- // free workspace, and free the old A->p unless it's shallow //---------------------------------------------------------------------- GB_FREE (Count) ; if (!Ap_old_shallow) { GB_FREE (Ap_old) ; } } //-------------------------------------------------------------------------- // A is now in hypersparse form //-------------------------------------------------------------------------- ASSERT (anz == GB_NNZ (A)) ; ASSERT_MATRIX_OK_OR_JUMBLED (A, "A converted to hypersparse", GB0) ; ASSERT (A->is_hyper) ; return (GrB_SUCCESS) ; }
BitEncode.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018-2020 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include "bb/Manager.h" #include "bb/Model.h" namespace bb { template<typename BinType=float, typename RealType=float> class BitEncode : public Model { using _super = Model; public: static inline std::string ModelName(void) { return "BitEncode"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; index_t m_bit_size = 0; indices_t m_input_shape; indices_t m_output_shape; public: // 生成情報 struct create_t { index_t bit_size = 1; indices_t output_shape; }; protected: BitEncode() {} BitEncode(create_t const &create) { m_bit_size = create.bit_size; m_output_shape = create.output_shape; } /** * @brief コマンド処理 * @detail コマンド処理 * @param args コマンド */ void CommandProc(std::vector<std::string> args) override { // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override { _super::PrintInfoText(os, indent, columns, nest, depth); // os << indent << " input shape : " << GetInputShape(); // os << indent << " output shape : " << GetOutputShape(); os << indent << " bit_size : " << m_bit_size << std::endl; } public: ~BitEncode() {} static std::shared_ptr<BitEncode> Create(create_t const &create) { return std::shared_ptr<BitEncode>(new BitEncode(create)); } static std::shared_ptr<BitEncode> Create(index_t bit_size, indices_t output_shape=indices_t()) { create_t create; create.bit_size = bit_size; create.output_shape = output_shape; return Create(create); } static std::shared_ptr<BitEncode> Create(void) { return Create(create_t()); } #ifdef BB_PYBIND11 static std::shared_ptr<BitEncode> CreatePy(index_t bit_size, indices_t output_shape=indices_t()) { create_t create; create.bit_size = bit_size; create.output_shape = output_shape; return Create(create); } #endif /** * @brief 入力形状設定 * @detail 入力形状を設定する * 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする * 同一形状を指定しても内部変数は初期化されるものとする * @param shape 1フレームのノードを構成するshape * @return 出力形状を返す */ indices_t SetInputShape(indices_t shape) { m_input_shape = shape; if ( m_output_shape.empty() || CalcShapeSize(shape)*m_bit_size != CalcShapeSize(m_output_shape) ) { m_output_shape = m_input_shape; m_output_shape[0] *= m_bit_size; } BB_ASSERT(CalcShapeSize(m_output_shape) % m_bit_size == 0); BB_ASSERT(CalcShapeSize(m_output_shape) / m_bit_size == CalcShapeSize(m_input_shape)); return m_output_shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_output_shape; } /** * @brief forward演算 * @detail forward演算を行う * @param x 入力データ * @param train 学習時にtrueを指定 * @return forward演算結果 */ inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true) { // 戻り値のサイズ設定 FrameBuffer y_buf( x_buf.GetFrameSize(), m_output_shape, DataType<BinType>::type); #ifdef BB_WITH_CUDA if ( !m_host_only && DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(true); bbcu_bit_BitEncode<RealType>( (RealType const *)x_ptr.GetAddr(), (int *)y_ptr.GetAddr(), (unsigned int )m_bit_size, (RealType )0, (RealType )1, (RealType )((1 << m_bit_size) - 1), (RealType )0, (unsigned int )GetInputNodeSize(), (unsigned int )x_buf.GetFrameSize(), (unsigned int )(x_buf.GetFrameStride() / sizeof(RealType)), (unsigned int )(y_buf.GetFrameStride() / sizeof(int)) ); return y_buf; } #endif { // 汎用版 index_t frame_size = x_buf.GetFrameSize(); index_t node_size = x_buf.GetNodeSize(); auto x_ptr = x_buf.LockConst<RealType>(); auto y_ptr = y_buf.Lock<BinType>(); #pragma omp parallel for for ( index_t node = 0; node < node_size; ++node ) { for ( index_t frame = 0; frame < frame_size; ++frame ) { int x = (int)(x_ptr.Get(frame, node) * ((1 << m_bit_size) - 1)); for ( int bit = 0; bit < m_bit_size; ++bit ) { if ( x & (1 << bit) ) { y_ptr.Set(frame, node_size*bit + node, (BinType)BB_BINARY_HI); } else { y_ptr.Set(frame, node_size*bit + node, (BinType)BB_BINARY_LO); } } } } return y_buf; } } /** * @brief backward演算 * @detail backward演算を行う * * @return backward演算結果 */ inline FrameBuffer Backward(FrameBuffer dy_buf) { if (dy_buf.Empty()) { return dy_buf; } // 戻り値のサイズ設定 FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<RealType>::type); dx_buf.FillZero(); return dx_buf; } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_bit_size); bb::SaveValue(os, m_input_shape); bb::SaveValue(os, m_output_shape); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_bit_size); bb::LoadValue(is, m_input_shape); bb::LoadValue(is, m_output_shape); // 再構築 if ( m_output_shape.empty() && !m_input_shape.empty() ) { m_output_shape = m_input_shape; m_output_shape[0] *= m_bit_size; BB_ASSERT(m_bit_size != 0); BB_ASSERT(CalcShapeSize(m_output_shape) % m_bit_size == 0); BB_ASSERT(CalcShapeSize(m_output_shape) / m_bit_size == CalcShapeSize(m_input_shape)); } } }; } // end of file
squareddifference_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include <math.h> int ref_squareddifference_fp32(struct ir_tensor* input_tensor_0, struct ir_tensor* input_tensor_1, struct ir_tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor_0->dim_num < 4) { float* input0 = input_tensor_0->data; float* input1 = input_tensor_1->data; float* output = output_tensor->data; int total_size = output_tensor->elem_num; for (int i = 0; i < total_size; i++) { output[i] = powf((input0[i] - input1[i]), 2); } return 0; } // dims size 3 else if (output_tensor->dim_num == 4) { int w = output_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = output_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input0 = input_tensor_0->data; float* input1 = input_tensor_1->data; float* output = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src0 = input0 + c_step * q; float* src1 = input1 + c_step * q; float* dst = output + c_step * q; for (int i = 0; i < size; i++) { dst[i] = powf((src0[i] - src1[i]), 2); } } return 0; } return -1; } int ref_squareddifference_uint8(struct ir_tensor* input_tensor_0, struct ir_tensor* input_tensor_1, struct ir_tensor* output_tensor, int num_thread) { /* dequant */ uint8_t* input0_uint8 = input_tensor_0->data; uint8_t* input1_uint8 = input_tensor_1->data; uint8_t* output_uint8 = output_tensor->data; float input0_scale = input_tensor_0->scale; float input1_scale = input_tensor_1->scale; float output_scale = output_tensor->scale; int32_t input0_zero = input_tensor_0->zero_point; int32_t input1_zero = input_tensor_1->zero_point; int32_t output_zero = output_tensor->zero_point; int input0_size = input_tensor_0->elem_num; int input1_size = input_tensor_1->elem_num; int output_size = output_tensor->elem_num; float* input0 = ( float* )sys_malloc(input0_size * sizeof(float)); float* input1 = ( float* )sys_malloc(input1_size * sizeof(float)); float* output = ( float* )sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input0_size; i++) { input0[i] = (( float )input0_uint8[i] - ( float )input0_zero) * input0_scale; } for (int i = 0; i < input1_size; i++) { input1[i] = (( float )input1_uint8[i] - ( float )input1_zero) * input1_scale; } // dims size = 2 or 3 if (input_tensor_0->dim_num < 4) { int total_size = output_tensor->elem_num; for (int i = 0; i < total_size; i++) { output[i] = powf((input0[i] - input1[i]), 2); } return 0; } // dims size 3 else if (output_tensor->dim_num == 4) { int w = output_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = output_tensor->dims[1]; int size = h * w; int c_step = h * w; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src0 = input0 + c_step * q; float* src1 = input1 + c_step * q; float* dst = output + c_step * q; for (int i = 0; i < size; i++) { dst[i] = powf((src0[i] - src1[i]), 2); } } return 0; } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(output[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input0); sys_free(input1); sys_free(output); return -1; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor_0; struct ir_tensor* input_tensor_1; struct ir_tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor_0 = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); input_tensor_1 = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor_0->data_type == TENGINE_DT_FP32) ret = ref_squareddifference_fp32(input_tensor_0, input_tensor_1, output_tensor, exec_graph->num_thread); else if(input_tensor_0->data_type == TENGINE_DT_UINT8) ret = ref_squareddifference_uint8(input_tensor_0, input_tensor_1, output_tensor, exec_graph->num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_squareddifference_hcl_ops(void* arg) { return register_builtin_node_ops(OP_SQUAREDDIFFERENCE, &hcl_node_ops); } static int unreg_squareddifference_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_SQUAREDDIFFERENCE, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_squareddifference_hcl_ops); AUTO_UNREGISTER_OPS(unreg_squareddifference_hcl_ops);
mrcore.c
/*************************************************************************** * Copyright 2013 CertiVox UK Ltd. * * This file is part of CertiVox MIRACL Crypto SDK. * * The CertiVox MIRACL Crypto SDK provides developers with an * extensive and efficient set of cryptographic functions. * For further information about its features and functionalities please * refer to http://www.certivox.com * * * The CertiVox MIRACL Crypto SDK is free software: you can * redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the * Free Software Foundation, either version 3 of the License, * or (at your option) any later version. * * * The CertiVox MIRACL Crypto SDK is distributed in the hope * that it will be useful, but WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Public License for more details. * * * You should have received a copy of the GNU Affero General Public * License along with CertiVox MIRACL Crypto SDK. * If not, see <http://www.gnu.org/licenses/>. * * You can be released from the requirements of the license by purchasing * a commercial license. Buying such a license is mandatory as soon as you * develop commercial activities involving the CertiVox MIRACL Crypto SDK * without disclosing the source code of your own applications, or shipping * the CertiVox MIRACL Crypto SDK with a closed source product. * * ***************************************************************************/ /* * * MIRACL Core module - contains initialisation code and general purpose * utilities * mrcore.c * * Space can be saved by removing unneeded functions (mr_and ?) * */ #include "miracl.h" #include <stdlib.h> #include <string.h> #ifdef MR_FP #include <math.h> #endif /*** Multi-Threaded Support ***/ #ifndef MR_GENERIC_MT #ifdef MR_OPENMP_MT #include <omp.h> #define MR_MIP_EXISTS miracl *mr_mip; #pragma omp threadprivate(mr_mip) miracl *get_mip() { return mr_mip; } void mr_init_threading() { } void mr_end_threading() { } #endif #ifdef MR_WINDOWS_MT #include <windows.h> DWORD mr_key; miracl *get_mip() { return (miracl *)TlsGetValue(mr_key); } void mr_init_threading() { mr_key=TlsAlloc(); } void mr_end_threading() { TlsFree(mr_key); } #endif #ifdef MR_UNIX_MT #include <pthread.h> pthread_key_t mr_key; miracl *get_mip() { return (miracl *)pthread_getspecific(mr_key); } void mr_init_threading() { pthread_key_create(&mr_key,(void(*)(void *))NULL); } void mr_end_threading() { pthread_key_delete(mr_key); } #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT #ifdef MR_STATIC miracl mip; miracl *mr_mip=&mip; #else miracl *mr_mip=NULL; /* MIRACL's one and only global variable */ #endif #define MR_MIP_EXISTS miracl *get_mip() { return (miracl *)mr_mip; } #endif #endif #endif #ifdef MR_MIP_EXISTS void set_mip(miracl *mip) { mr_mip=mip; } #endif #endif /* See Advanced Windows by Jeffrey Richter, Chapter 12 for methods for creating different instances of this global for each executing thread when using Windows '95/NT */ #ifdef MR_STATIC #if MIRACL==8 static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,0}; #else static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211, 223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331, 337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449, 457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587, 593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709, 719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853, 857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991, 997,0}; #endif #endif #ifndef MR_STRIPPED_DOWN #ifndef MR_NO_STANDARD_IO static char *names[] = {(char *)"your program",(char *)"innum",(char *)"otnum",(char *)"jack",(char *)"normalise", (char *)"multiply",(char *)"divide",(char *)"incr",(char *)"decr",(char *)"premult", (char *)"subdiv",(char *)"fdsize",(char *)"egcd",(char *)"cbase", (char *)"cinnum",(char *)"cotnum",(char *)"nroot",(char *)"power", (char *)"powmod",(char *)"bigdig",(char *)"bigrand",(char *)"nxprime",(char *)"isprime", (char *)"mirvar",(char *)"mad",(char *)"multi_inverse",(char *)"putdig", (char *)"add",(char *)"subtract",(char *)"mirsys",(char *)"xgcd", (char *)"fpack",(char *)"dconv",(char *)"mr_shift",(char *)"mround",(char *)"fmul", (char *)"fdiv",(char *)"fadd",(char *)"fsub",(char *)"fcomp",(char *)"fconv", (char *)"frecip",(char *)"fpmul",(char *)"fincr",(char *)"",(char *)"ftrunc", (char *)"frand",(char *)"sftbit",(char *)"build",(char *)"logb2",(char *)"expint", (char *)"fpower",(char *)"froot",(char *)"fpi",(char *)"fexp",(char *)"flog",(char *)"fpowf", (char *)"ftan",(char *)"fatan",(char *)"fsin",(char *)"fasin",(char *)"fcos",(char *)"facos", (char *)"ftanh",(char *)"fatanh",(char *)"fsinh",(char *)"fasinh",(char *)"fcosh", (char *)"facosh",(char *)"flop",(char *)"gprime",(char *)"powltr",(char *)"fft_mult", (char *)"crt_init",(char *)"crt",(char *)"otstr",(char *)"instr",(char *)"cotstr",(char *)"cinstr",(char *)"powmod2", (char *)"prepare_monty",(char *)"nres",(char *)"redc",(char *)"nres_modmult",(char *)"nres_powmod", (char *)"nres_moddiv",(char *)"nres_powltr",(char *)"divisible",(char *)"remain", (char *)"fmodulo",(char *)"nres_modadd",(char *)"nres_modsub",(char *)"nres_negate", (char *)"ecurve_init",(char *)"ecurve_add",(char *)"ecurve_mult", (char *)"epoint_init",(char *)"epoint_set",(char *)"epoint_get",(char *)"nres_powmod2", (char *)"nres_sqroot",(char *)"sqroot",(char *)"nres_premult",(char *)"ecurve_mult2", (char *)"ecurve_sub",(char *)"trial_division",(char *)"nxsafeprime",(char *)"nres_lucas",(char *)"lucas", (char *)"brick_init",(char *)"pow_brick",(char *)"set_user_function", (char *)"nres_powmodn",(char *)"powmodn",(char *)"ecurve_multn", (char *)"ebrick_init",(char *)"mul_brick",(char *)"epoint_norm",(char *)"nres_multi_inverse",(char *)"", (char *)"nres_dotprod",(char *)"epoint_negate",(char *)"ecurve_multi_add", (char *)"ecurve2_init",(char *)"",(char *)"epoint2_set",(char *)"epoint2_norm",(char *)"epoint2_get", (char *)"epoint2_comp",(char *)"ecurve2_add",(char *)"epoint2_negate",(char *)"ecurve2_sub", (char *)"ecurve2_multi_add",(char *)"ecurve2_mult",(char *)"ecurve2_multn",(char *)"ecurve2_mult2", (char *)"ebrick2_init",(char *)"mul2_brick",(char *)"prepare_basis",(char *)"strong_bigrand", (char *)"bytes_to_big",(char *)"big_to_bytes",(char *)"set_io_buffer_size", (char *)"epoint_getxyz",(char *)"epoint_double_add",(char *)"nres_double_inverse", (char *)"double_inverse",(char *)"epoint_x",(char *)"hamming",(char *)"expb2",(char *)"bigbits", (char *)"nres_lazy",(char *)"zzn2_imul",(char *)"nres_double_modadd",(char *)"nres_double_modsub", /*155*/(char *)"",(char *)"zzn2_from_int",(char *)"zzn2_negate",(char *)"zzn2_conj",(char *)"zzn2_add", (char *)"zzn2_sub",(char *)"zzn2_smul",(char *)"zzn2_mul",(char *)"zzn2_inv",(char *)"zzn2_timesi",(char *)"zzn2_powl", (char *)"zzn2_from_bigs",(char *)"zzn2_from_big",(char *)"zzn2_from_ints", (char *)"zzn2_sadd",(char *)"zzn2_ssub",(char *)"zzn2_times_irp",(char *)"zzn2_div2", (char *)"zzn3_from_int",(char *)"zzn3_from_ints",(char *)"zzn3_from_bigs", (char *)"zzn3_from_big",(char *)"zzn3_negate",(char *)"zzn3_powq",(char *)"zzn3_init", (char *)"zzn3_add",(char *)"zzn3_sadd",(char *)"zzn3_sub",(char *)"zzn3_ssub",(char *)"zzn3_smul", (char *)"zzn3_imul",(char *)"zzn3_mul",(char *)"zzn3_inv",(char *)"zzn3_div2",(char *)"zzn3_timesi", (char *)"epoint_multi_norm",(char *)"mr_jsf",(char *)"epoint2_multi_norm", (char *)"ecn2_compare",(char *)"ecn2_norm",(char *)"ecn2_set",(char *)"zzn2_txx", (char *)"zzn2_txd",(char *)"nres_div2",(char *)"nres_div3",(char *)"zzn2_div3", (char *)"ecn2_setx",(char *)"ecn2_rhs",(char *)"zzn2_qr",(char *)"zzn2_sqrt",(char *)"ecn2_add",(char *)"ecn2_mul2_jsf",(char *)"ecn2_mul", (char *)"nres_div5",(char *)"zzn2_div5",(char *)"zzn2_sqr",(char *)"ecn2_add_sub",(char *)"ecn2_psi",(char *)"invmodp", (char *)"zzn2_multi_inverse",(char *)"ecn2_multi_norm",(char *)"ecn2_precomp",(char *)"ecn2_mul4_gls_v", (char *)"ecn2_mul2",(char *)"ecn2_precomp_gls",(char *)"ecn2_mul2_gls", (char *)"ecn2_brick_init",(char *)"ecn2_mul_brick_gls",(char *)"ecn2_multn",(char *)"zzn3_timesi2", (char *)"nres_complex",(char *)"zzn4_from_int",(char *)"zzn4_negate",(char *)"zzn4_conj",(char *)"zzn4_add",(char *)"zzn4_sadd",(char *)"zzn4_sub",(char *)"zzn4_ssub",(char *)"zzn4_smul",(char *)"zzn4_sqr", (char *)"zzn4_mul",(char *)"zzn4_inv",(char *)"zzn4_div2",(char *)"zzn4_powq",(char *)"zzn4_tx",(char *)"zzn4_imul",(char *)"zzn4_lmul",(char *)"zzn4_from_big", (char *)"ecn2_mult4"}; /* 0 - 243 (244 in all) */ #endif #endif #ifdef MR_NOASM /* C only versions of muldiv/muldvd/muldvd2/muldvm */ /* Note that mr_large should be twice the size of mr_small */ mr_small muldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)(MR_LROUND(p/m)); *rp=(mr_small)(p-(mr_large)q*m); return q; } #ifdef MR_FP_ROUNDING mr_small imuldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_large im,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)MR_LROUND(p*im); *rp=(mr_small)(p-(mr_large)q*m); return q; } #endif #ifndef MR_NOFULLWIDTH mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { mr_small q; union doubleword dble; dble.h[MR_BOT]=c; dble.h[MR_TOP]=a; q=(mr_small)(dble.d/m); *rp=(mr_small)(dble.d-(mr_large)q*m); return q; } mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+c; *rp=dble.h[MR_BOT]; return dble.h[MR_TOP]; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+*c+*rp; *rp=dble.h[MR_BOT]; *c=dble.h[MR_TOP]; } #endif #endif #ifdef MR_NOFULLWIDTH /* no FULLWIDTH working, so supply dummies */ /* mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { return (mr_small)0; } mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { return (mr_small)0; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { } */ #endif #ifndef MR_NO_STANDARD_IO static void mputs(char *s) { /* output a string */ int i=0; while (s[i]!=0) fputc((int)s[i++],stdout); } #endif void mr_berror(_MIPD_ int nerr) { /* Big number error routine */ #ifndef MR_STRIPPED_DOWN int i; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERCON) { mr_mip->ERNUM=nerr; return; } #ifndef MR_NO_STANDARD_IO #ifndef MR_STRIPPED_DOWN mputs((char *)"\nMIRACL error from routine "); if (mr_mip->depth<MR_MAXDEPTH) mputs(names[mr_mip->trace[mr_mip->depth]]); else mputs((char *)"???"); fputc('\n',stdout); for (i=mr_mip->depth-1;i>=0;i--) { mputs((char *)" called from "); if (i<MR_MAXDEPTH) mputs(names[mr_mip->trace[i]]); else mputs((char *)"???"); fputc('\n',stdout); } switch (nerr) { case 1 : mputs((char *)"Number base too big for representation\n"); break; case 2 : mputs((char *)"Division by zero attempted\n"); break; case 3 : mputs((char *)"Overflow - Number too big\n"); break; case 4 : mputs((char *)"Internal result is negative\n"); break; case 5 : mputs((char *)"Input format error\n"); break; case 6 : mputs((char *)"Illegal number base\n"); break; case 7 : mputs((char *)"Illegal parameter usage\n"); break; case 8 : mputs((char *)"Out of space\n"); break; case 9 : mputs((char *)"Even root of a negative number\n"); break; case 10: mputs((char *)"Raising integer to negative power\n"); break; case 11: mputs((char *)"Attempt to take illegal root\n"); break; case 12: mputs((char *)"Integer operation attempted on Flash number\n"); break; case 13: mputs((char *)"Flash overflow\n"); break; case 14: mputs((char *)"Numbers too big\n"); break; case 15: mputs((char *)"Log of a non-positive number\n"); break; case 16: mputs((char *)"Flash to double conversion failure\n"); break; case 17: mputs((char *)"I/O buffer overflow\n"); break; case 18: mputs((char *)"MIRACL not initialised - no call to mirsys()\n"); break; case 19: mputs((char *)"Illegal modulus \n"); break; case 20: mputs((char *)"No modulus defined\n"); break; case 21: mputs((char *)"Exponent too big\n"); break; case 22: mputs((char *)"Unsupported Feature - check mirdef.h\n"); break; case 23: mputs((char *)"Specified double length type isn't double length\n"); break; case 24: mputs((char *)"Specified basis is NOT irreducible\n"); break; case 25: mputs((char *)"Unable to control Floating-point rounding\n"); break; case 26: mputs((char *)"Base must be binary (MR_ALWAYS_BINARY defined in mirdef.h ?)\n"); break; case 27: mputs((char *)"No irreducible basis defined\n"); break; case 28: mputs((char *)"Composite modulus\n"); break; case 29: mputs((char *)"Input/output error when reading from RNG device node\n"); break; default: mputs((char *)"Undefined error\n"); break; } exit(0); #else mputs((char *)"MIRACL error\n"); exit(0); #endif #endif } #ifndef MR_STRIPPED_DOWN void mr_track(_MIPDO_ ) { /* track course of program execution * * through the MIRACL routines */ #ifndef MR_NO_STANDARD_IO int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif for (i=0;i<mr_mip->depth;i++) fputc('-',stdout); fputc('>',stdout); mputs(names[mr_mip->trace[mr_mip->depth]]); fputc('\n',stdout); #endif } #endif #ifndef MR_NO_RAND mr_small brand(_MIPDO_ ) { /* Marsaglia & Zaman random number generator */ int i,k; mr_unsign32 pdiff,t; mr_small r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->lg2b>32) { /* underlying type is > 32 bits. Assume <= 64 bits */ mr_mip->rndptr+=2; if (mr_mip->rndptr<NK-1) { r=(mr_small)mr_mip->ira[mr_mip->rndptr]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[mr_mip->rndptr+1]; return r; } } else { mr_mip->rndptr++; if (mr_mip->rndptr<NK) return (mr_small)mr_mip->ira[mr_mip->rndptr]; } mr_mip->rndptr=0; for (i=0,k=NK-NJ;i<NK;i++,k++) { /* calculate next NK values */ if (k==NK) k=0; t=mr_mip->ira[k]; pdiff=t - mr_mip->ira[i] - mr_mip->borrow; if (pdiff<t) mr_mip->borrow=0; if (pdiff>t) mr_mip->borrow=1; mr_mip->ira[i]=pdiff; } if (mr_mip->lg2b>32) { /* double up */ r=(mr_small)mr_mip->ira[0]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[1]; return r; } else return (mr_small)(mr_mip->ira[0]); } void irand(_MIPD_ mr_unsign32 seed) { /* initialise random number system */ int i,in; mr_unsign32 t,m=1L; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_mip->borrow=0L; mr_mip->rndptr=0; mr_mip->ira[0]=seed; for (i=1;i<NK;i++) { /* fill initialisation vector */ in=(NV*i)%NK; mr_mip->ira[in]=m; t=m; m=seed-m; seed=t; } for (i=0;i<1000;i++) brand(_MIPPO_ ); /* "warm-up" & stir the generator */ } #endif mr_small mr_shiftbits(mr_small x,int n) { #ifdef MR_FP int i; mr_small dres; if (n==0) return x; if (n>0) { for (i=0;i<n;i++) x=x+x; return x; } n=-n; for (i=0;i<n;i++) x=MR_DIV(x,2.0); return x; #else if (n==0) return x; if (n>0) x<<=n; else x>>=(-n); return x; #endif } mr_small mr_setbase(_MIPD_ mr_small nb) { /* set base. Pack as many digits as * * possible into each computer word */ mr_small temp; #ifdef MR_FP mr_small dres; #endif #ifndef MR_NOFULLWIDTH BOOL fits; int bits; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif fits=FALSE; bits=MIRACL; while (bits>1) { bits/=2; temp=((mr_small)1<<bits); if (temp==nb) { fits=TRUE; break; } if (temp<nb || (bits%2)!=0) break; } if (fits) { mr_mip->apbase=nb; mr_mip->pack=MIRACL/bits; mr_mip->base=0; return 0; } #endif mr_mip->apbase=nb; mr_mip->pack=1; mr_mip->base=nb; #ifdef MR_SIMPLE_BASE return 0; #else if (mr_mip->base==0) return 0; temp=MR_DIV(MAXBASE,nb); while (temp>=nb) { temp=MR_DIV(temp,nb); mr_mip->base*=nb; mr_mip->pack++; } #ifdef MR_FP_ROUNDING mr_mip->inverse_base=mr_invert(mr_mip->base); return mr_mip->inverse_base; #else return 0; #endif #endif } #ifdef MR_FLASH BOOL fit(big x,big y,int f) { /* returns TRUE if x/y would fit flash format of length f */ int n,d; n=(int)(x->len&(MR_OBITS)); d=(int)(y->len&(MR_OBITS)); if (n==1 && x->w[0]==1) n=0; if (d==1 && y->w[0]==1) d=0; if (n+d<=f) return TRUE; return FALSE; } #endif int mr_lent(flash x) { /* return length of big or flash in words */ mr_lentype lx; lx=(x->len&(MR_OBITS)); #ifdef MR_FLASH return (int)((lx&(MR_MSK))+((lx>>(MR_BTS))&(MR_MSK))); #else return (int)lx; #endif } void zero(flash x) { /* set big/flash number to zero */ int i,n; mr_small *g; if (x==NULL) return; #ifdef MR_FLASH n=mr_lent(x); #else n=(x->len&MR_OBITS); #endif g=x->w; for (i=0;i<n;i++) g[i]=0; x->len=0; } void uconvert(_MIPD_ unsigned int n ,big x) { /* convert unsigned integer n to big number format */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_IBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%((mr_small)1<<(MIRACL))); n/=((mr_small)1<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN((mr_small)n,mr_mip->base); n=(unsigned int)((mr_small)n/mr_mip->base); } #endif x->len=m; } void tconvert(_MIPD_ mr_utype n,big x) { mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } x->w[0]=n; x->len=1; x->len|=s; } void convert(_MIPD_ int n ,big x) { /* convert signed integer n to big number format */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } uconvert(_MIPP_ (unsigned int)n,x); x->len|=s; } #ifndef MR_STATIC #ifdef mr_dltype void dlconv(_MIPD_ mr_dltype n,big x) { /* convert double length integer to big number format - rarely needed */ int m; mr_lentype s; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; s=0; if (n<0) { s=MR_MSBIT; n=(-n); } m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH while (n>0) { x->w[m++]=(mr_small)(n%((mr_dltype)1<<(MIRACL))); n/=((mr_dltype)1<<(MIRACL)); } #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=(mr_small)MR_REMAIN(n,mr_mip->base); n/=mr_mip->base; } #endif x->len=(m|s); } #endif void ulgconv(_MIPD_ unsigned long n,big x) { /* convert unsigned long integer to big number format - rarely needed */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_LBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%(1L<<(MIRACL))); n/=(1L<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN(n,mr_mip->base); n=(unsigned long)((mr_small)n/mr_mip->base); } #endif x->len=m; } void lgconv(_MIPD_ long n,big x) { /* convert signed long integer to big number format - rarely needed */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } ulgconv(_MIPP_ (unsigned long)n,x); x->len|=s; } flash mirvar(_MIPD_ int iv) { /* initialize big/flash number */ flash x; int align; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(23); if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return NULL; } /* OK, now I control alignment.... */ /* Allocate space for big, the length, the pointer, and the array */ /* Do it all in one memory allocation - this is quicker */ /* Ensure that the array has correct alignment */ x=(big)mr_alloc(_MIPP_ mr_size(mr_mip->nib-1),1); if (x==NULL) { MR_OUT return x; } ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); if (iv!=0) convert(_MIPP_ iv,x); MR_OUT return x; } #endif flash mirvar_mem_variable(char *mem,int index,int sz) { flash x; int align; char *ptr; int offset,r; /* alignment */ offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; x=(big)&mem[offset+mr_size(sz)*index]; ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); return x; } flash mirvar_mem(_MIPD_ char *mem,int index) { /* initialize big/flash number from pre-allocated memory */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return mirvar_mem_variable(mem,index,mr_mip->nib-1); } void set_user_function(_MIPD_ BOOL (*user)(void)) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(111) if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return; } mr_mip->user=user; MR_OUT } #ifndef MR_STATIC #ifndef MR_SIMPLE_IO void set_io_buffer_size(_MIPD_ int len) { int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (len<0) return; MR_IN(142) for (i=0;i<mr_mip->IOBSIZ;i++) mr_mip->IOBUFF[i]=0; mr_free(mr_mip->IOBUFF); if (len==0) { MR_OUT return; } mr_mip->IOBSIZ=len; mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ len+1,1); mr_mip->IOBUFF[0]='\0'; MR_OUT } #endif #endif /* Initialise a big from ROM given its fixed length */ BOOL init_big_from_rom(big x,int len,const mr_small *rom,int romsize,int *romptr) { int i; zero(x); x->len=len; for (i=0;i<len;i++) { if (*romptr>=romsize) return FALSE; #ifdef MR_AVR x->w[i]=pgm_read_byte_near(&rom[*romptr]); #else x->w[i]=rom[*romptr]; #endif (*romptr)++; } mr_lzero(x); return TRUE; } /* Initialise an elliptic curve point from ROM */ BOOL init_point_from_rom(epoint *P,int len,const mr_small *rom,int romsize,int *romptr) { if (!init_big_from_rom(P->X,len,rom,romsize,romptr)) return FALSE; if (!init_big_from_rom(P->Y,len,rom,romsize,romptr)) return FALSE; P->marker=MR_EPOINT_NORMALIZED; return TRUE; } #ifdef MR_GENERIC_AND_STATIC miracl *mirsys(miracl *mr_mip,int nd,mr_small nb) #else miracl *mirsys(int nd,mr_small nb) #endif { /* Initialize MIRACL system to * * use numbers to base nb, and * * nd digits or (-nd) bytes long */ /* In these cases mr_mip is passed as the first parameter */ #ifdef MR_GENERIC_AND_STATIC return mirsys_basic(mr_mip,nd,nb); #endif #ifdef MR_GENERIC_MT #ifndef MR_STATIC miracl *mr_mip=mr_first_alloc(); return mirsys_basic(mr_mip,nd,nb); #endif #endif /* In these cases mr_mip is a "global" pointer and the mip itself is allocated from the heap. In fact mr_mip (and mip) may be thread specific if some multi-threading scheme is implemented */ #ifndef MR_STATIC #ifdef MR_WINDOWS_MT miracl *mr_mip=mr_first_alloc(); TlsSetValue(mr_key,mr_mip); #endif #ifdef MR_UNIX_MT miracl *mr_mip=mr_first_alloc(); pthread_setspecific(mr_key,mr_mip); #endif #ifdef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #endif #endif #endif #ifndef MR_GENERIC_MT mr_mip=get_mip(); #endif return mirsys_basic(mr_mip,nd,nb); } miracl *mirsys_basic(miracl *mr_mip,int nd,mr_small nb) { #ifndef MR_NO_RAND int i; #endif mr_small b,nw; #ifdef MR_FP mr_small dres; #endif if (mr_mip==NULL) return NULL; #ifndef MR_STRIPPED_DOWN mr_mip->depth=0; mr_mip->trace[0]=0; mr_mip->depth++; mr_mip->trace[mr_mip->depth]=29; #endif /* digest hardware configuration */ #ifdef MR_NO_STANDARD_IO mr_mip->ERCON=TRUE; #else mr_mip->ERCON=FALSE; #endif #ifndef MR_STATIC mr_mip->logN=0; mr_mip->degree=0; mr_mip->chin.NP=0; #endif mr_mip->user=NULL; mr_mip->same=FALSE; mr_mip->first_one=FALSE; mr_mip->debug=FALSE; mr_mip->AA=0; #ifndef MR_AFFINE_ONLY mr_mip->coord=MR_NOTSET; #endif #ifdef MR_NOFULLWIDTH if (nb==0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif #ifndef MR_FP #ifdef mr_dltype #ifndef MR_NOFULLWIDTH if (sizeof(mr_dltype)<2*sizeof(mr_utype)) { /* double length type, isn't */ mr_berror(_MIPP_ MR_ERR_NOT_DOUBLE_LEN); MR_OUT return mr_mip; } #endif #endif #endif if (nb==1 || nb>MAXBASE) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #ifdef MR_FP_ROUNDING if (mr_setbase(_MIPP_ nb)==0) { /* unable in fact to control FP rounding */ mr_berror(_MIPP_ MR_ERR_NO_ROUNDING); MR_OUT return mr_mip; } #else mr_setbase(_MIPP_ nb); #endif b=mr_mip->base; #ifdef MR_SIMPLE_BASE if (b!=0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif mr_mip->lg2b=0; mr_mip->base2=1; #ifndef MR_SIMPLE_BASE if (b==0) { #endif mr_mip->lg2b=MIRACL; mr_mip->base2=0; #ifndef MR_SIMPLE_BASE } else while (b>1) { b=MR_DIV(b,2); mr_mip->lg2b++; mr_mip->base2*=2; } #endif #ifdef MR_ALWAYS_BINARY if (mr_mip->base!=mr_mip->base2) { mr_berror(_MIPP_ MR_ERR_NOT_BINARY); MR_OUT return mr_mip; } #endif /* calculate total space for bigs */ /* big -> |int len|small *ptr| alignment space | size in words +1| alignment up to multiple of 4 | */ if (nd>0) nw=MR_ROUNDUP(nd,mr_mip->pack); else nw=MR_ROUNDUP(8*(-nd),mr_mip->lg2b); if (nw<1) nw=1; mr_mip->nib=(int)(nw+1); /* add one extra word for small overflows */ #ifdef MR_STATIC if (nw>MR_STATIC) { mr_berror(_MIPP_ MR_ERR_TOO_BIG); MR_OUT return mr_mip; } #endif /* mr_mip->nib=(int)(nw+1); add one extra word for small overflows */ #ifdef MR_FLASH mr_mip->workprec=mr_mip->nib; mr_mip->stprec=mr_mip->nib; while (mr_mip->stprec>2 && mr_mip->stprec>MR_FLASH/mr_mip->lg2b) mr_mip->stprec=(mr_mip->stprec+1)/2; if (mr_mip->stprec<2) mr_mip->stprec=2; #endif #ifndef MR_DOUBLE_BIG mr_mip->check=ON; #else mr_mip->check=OFF; #endif #ifndef MR_SIMPLE_BASE #ifndef MR_SIMPLE_IO mr_mip->IOBASE=10; /* defaults */ #endif #endif mr_mip->ERNUM=0; mr_mip->NTRY=6; mr_mip->MONTY=ON; #ifdef MR_FLASH mr_mip->EXACT=TRUE; mr_mip->RPOINT=OFF; #endif #ifndef MR_STRIPPED_DOWN mr_mip->TRACER=OFF; #endif #ifndef MR_SIMPLE_IO mr_mip->INPLEN=0; mr_mip->IOBSIZ=MR_DEFAULT_BUFFER_SIZE; #endif #ifdef MR_STATIC mr_mip->PRIMES=mr_small_primes; #else mr_mip->PRIMES=NULL; #ifndef MR_SIMPLE_IO mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ MR_DEFAULT_BUFFER_SIZE+1,1); #endif #endif #ifndef MR_SIMPLE_IO mr_mip->IOBUFF[0]='\0'; #endif mr_mip->qnr=0; mr_mip->cnr=0; mr_mip->TWIST=0; mr_mip->pmod8=0; mr_mip->pmod9=0; /* quick start for rng. irand(.) should be called first before serious use.. */ #ifndef MR_NO_RAND mr_mip->ira[0]=0x55555555; mr_mip->ira[1]=0x12345678; for (i=2;i<NK;i++) mr_mip->ira[i]=mr_mip->ira[i-1]+mr_mip->ira[i-2]+0x1379BDF1; mr_mip->rndptr=NK; mr_mip->borrow=0; #endif mr_mip->nib=2*mr_mip->nib+1; #ifdef MR_FLASH if (mr_mip->nib!=(mr_mip->nib&(MR_MSK))) #else if (mr_mip->nib!=(int)(mr_mip->nib&(MR_OBITS))) #endif { mr_berror(_MIPP_ MR_ERR_TOO_BIG); mr_mip->nib=(mr_mip->nib-1)/2; MR_OUT return mr_mip; } #ifndef MR_STATIC mr_mip->workspace=(char *)memalloc(_MIPP_ MR_SPACES); /* grab workspace */ #else memset(mr_mip->workspace,0,MR_BIG_RESERVE(MR_SPACES)); #endif mr_mip->M=0; mr_mip->fin=FALSE; mr_mip->fout=FALSE; mr_mip->active=ON; mr_mip->nib=(mr_mip->nib-1)/2; /* allocate memory for workspace variables */ #ifndef MR_DOUBLE_BIG mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* double length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,2); mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,3); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,4); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,5); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,6); /* double length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,8); /* double length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,10); /* double length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,12); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,13); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,14); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,15); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,16); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,17); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,18); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,19); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,20); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,21); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,22); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,26); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,29); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,31); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,27); #endif #endif #else /* w0-w7 are double normal length */ mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* quad length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,4); /* double length */ mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,6); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,8); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,10); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,12); /* quad length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,16); /* quad length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,20); /* quad length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,26); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,29); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,30); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,31); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,32); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,33); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,34); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,36); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,37); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,38); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,39); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,40); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,41); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,43); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,39); #endif #endif #endif MR_OUT return mr_mip; } #ifndef MR_STATIC /* allocate space for a number of bigs from the heap */ void *memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif return mr_alloc(_MIPP_ mr_big_reserve(num,mr_mip->nib-1),1); } #endif void memkill(_MIPD_ char *mem,int len) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; memset(mem,0,mr_big_reserve(len,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void mirkill(big x) { /* kill a big/flash variable, that is set it to zero and free its memory */ if (x==NULL) return; zero(x); mr_free(x); } #endif void mirexit(_MIPDO_ ) { /* clean up after miracl */ int i; #ifdef MR_WINDOWS_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_UNIX_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_OPENMP_MT miracl *mr_mip=get_mip(); #endif mr_mip->ERCON=FALSE; mr_mip->active=OFF; memkill(_MIPP_ mr_mip->workspace,MR_SPACES); #ifndef MR_NO_RAND for (i=0;i<NK;i++) mr_mip->ira[i]=0L; #endif #ifndef MR_STATIC #ifndef MR_SIMPLE_IO set_io_buffer_size(_MIPP_ 0); #endif if (mr_mip->PRIMES!=NULL) mr_free(mr_mip->PRIMES); #else #ifndef MR_SIMPLE_IO for (i=0;i<=MR_DEFAULT_BUFFER_SIZE;i++) mr_mip->IOBUFF[i]=0; #endif #endif #ifndef MR_STATIC mr_free(mr_mip); #ifdef MR_WINDOWS_MT TlsSetValue(mr_key, NULL); /* Thank you Thales */ #endif #endif #ifndef MR_GENERIC_MT #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_STATIC mr_mip=NULL; #endif #endif #endif #endif #ifdef MR_OPENMP_MT mr_mip=NULL; #endif } int exsign(flash x) { /* extract sign of big/flash number */ if ((x->len&(MR_MSBIT))==0) return PLUS; else return MINUS; } void insign(int s,flash x) { /* assert sign of big/flash number */ if (x->len==0) return; if (s<0) x->len|=MR_MSBIT; else x->len&=MR_OBITS; } void mr_lzero(big x) { /* strip leading zeros from big number */ mr_lentype s; int m; s=(x->len&(MR_MSBIT)); m=(int)(x->len&(MR_OBITS)); while (m>0 && x->w[m-1]==0) m--; x->len=m; if (m>0) x->len|=s; } #ifndef MR_SIMPLE_IO int getdig(_MIPD_ big x,int i) { /* extract a packed digit */ int k; mr_small n; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif i--; n=x->w[i/mr_mip->pack]; if (mr_mip->pack==1) return (int)n; k=i%mr_mip->pack; for (i=1;i<=k;i++) n=MR_DIV(n,mr_mip->apbase); return (int)MR_REMAIN(n,mr_mip->apbase); } int numdig(_MIPD_ big x) { /* returns number of digits in x */ int nd; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (x->len==0) return 0; nd=(int)(x->len&(MR_OBITS))*mr_mip->pack; while (getdig(_MIPP_ x,nd)==0) nd--; return nd; } void putdig(_MIPD_ int n,big x,int i) { /* insert a digit into a packed word */ int j,k,lx; mr_small m,p; mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(26) s=(x->len&(MR_MSBIT)); lx=(int)(x->len&(MR_OBITS)); m=getdig(_MIPP_ x,i); p=n; i--; j=i/mr_mip->pack; k=i%mr_mip->pack; for (i=1;i<=k;i++) { m*=mr_mip->apbase; p*=mr_mip->apbase; } if (j>=mr_mip->nib && (mr_mip->check || j>=2*mr_mip->nib)) { mr_berror(_MIPP_ MR_ERR_OVERFLOW); MR_OUT return; } x->w[j]=(x->w[j]-m)+p; if (j>=lx) x->len=((j+1)|s); mr_lzero(x); MR_OUT } #endif #ifndef MR_FP void mr_and(big x,big y,big z) { /* z= bitwise logical AND of x and y */ int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=ny; else nr=nx; for (i=0;i<nr;i++) z->w[i]=x->w[i]&y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; } void mr_xor(big x,big y,big z) { int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=nx; else nr=ny; for (i=0;i<nr;i++) z->w[i]=x->w[i]^y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; } #endif void copy(flash x,flash y) { /* copy x to y: y=x */ int i,nx,ny; mr_small *gx,*gy; if (x==y || y==NULL) return; if (x==NULL) { zero(y); return; } #ifdef MR_FLASH ny=mr_lent(y); nx=mr_lent(x); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); #endif gx=x->w; gy=y->w; for (i=nx;i<ny;i++) gy[i]=0; for (i=0;i<nx;i++) gy[i]=gx[i]; y->len=x->len; } void negify(flash x,flash y) { /* negate a big/flash variable: y=-x */ copy(x,y); if (y->len!=0) y->len^=MR_MSBIT; } void absol(flash x,flash y) { /* y=abs(x) */ copy(x,y); y->len&=MR_OBITS; } BOOL mr_notint(flash x) { /* returns TRUE if x is Flash */ #ifdef MR_FLASH if ((((x->len&(MR_OBITS))>>(MR_BTS))&(MR_MSK))!=0) return TRUE; #endif return FALSE; } void mr_shift(_MIPD_ big x,int n,big w) { /* set w=x.(mr_base^n) by shifting */ mr_lentype s; int i,bl; mr_small *gw=w->w; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; copy(x,w); if (w->len==0 || n==0) return; MR_IN(33) if (mr_notint(w)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(w->len&(MR_MSBIT)); bl=(int)(w->len&(MR_OBITS))+n; if (bl<=0) { zero(w); MR_OUT return; } if (bl>mr_mip->nib && mr_mip->check) mr_berror(_MIPP_ MR_ERR_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } if (n>0) { for (i=bl-1;i>=n;i--) gw[i]=gw[i-n]; for (i=0;i<n;i++) gw[i]=0; } else { n=(-n); for (i=0;i<bl;i++) gw[i]=gw[i+n]; for (i=0;i<n;i++) gw[bl+i]=0; } w->len=(bl|s); MR_OUT } int size(big x) { /* get size of big number; convert to * * integer - if possible */ int n,m; mr_lentype s; if (x==NULL) return 0; s=(x->len&MR_MSBIT); m=(int)(x->len&MR_OBITS); if (m==0) return 0; if (m==1 && x->w[0]<(mr_small)MR_TOOBIG) n=(int)x->w[0]; else n=MR_TOOBIG; if (s==MR_MSBIT) return (-n); return n; } int mr_compare(big x,big y) { /* compare x and y: =1 if x>y =-1 if x<y * * =0 if x=y */ int m,n,sig; mr_lentype sx,sy; if (x==y) return 0; sx=(x->len&MR_MSBIT); sy=(y->len&MR_MSBIT); if (sx==0) sig=PLUS; else sig=MINUS; if (sx!=sy) return sig; m=(int)(x->len&MR_OBITS); n=(int)(y->len&MR_OBITS); if (m>n) return sig; if (m<n) return -sig; while (m>0) { /* check digit by digit */ m--; if (x->w[m]>y->w[m]) return sig; if (x->w[m]<y->w[m]) return -sig; } return 0; } #ifdef MR_FLASH void fpack(_MIPD_ big n,big d,flash x) { /* create floating-slash number x=n/d from * * big integer numerator and denominator */ mr_lentype s; int i,ld,ln; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(31) ld=(int)(d->len&MR_OBITS); if (ld==0) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (ld==1 && d->w[0]==1) ld=0; if (x==d) mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS); if (mr_notint(n) || mr_notint(d)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(n->len&MR_MSBIT); ln=(int)(n->len&MR_OBITS); if (ln==1 && n->w[0]==1) ln=0; if ((ld+ln>mr_mip->nib) && (mr_mip->check || ld+ln>2*mr_mip->nib)) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } copy(n,x); if (n->len==0) { MR_OUT return; } s^=(d->len&MR_MSBIT); if (ld==0) { if (x->len!=0) x->len|=s; MR_OUT return; } for (i=0;i<ld;i++) x->w[ln+i]=d->w[i]; x->len=(s|(ln+((mr_lentype)ld<<MR_BTS))); MR_OUT } void numer(_MIPD_ flash x,big y) { /* extract numerator of x */ int i,ln,ld; mr_lentype s,ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (mr_notint(x)) { s=(x->len&MR_MSBIT); ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); if (ln==0) { if(s==MR_MSBIT) convert(_MIPP_ (-1),y); else convert(_MIPP_ 1,y); return; } ld=(int)((ly>>MR_BTS)&MR_MSK); if (x!=y) { for (i=0;i<ln;i++) y->w[i]=x->w[i]; for (i=ln;i<mr_lent(y);i++) y->w[i]=0; } else for (i=0;i<ld;i++) y->w[ln+i]=0; y->len=(ln|s); } else copy(x,y); } void denom(_MIPD_ flash x,big y) { /* extract denominator of x */ int i,ln,ld; mr_lentype ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (!mr_notint(x)) { convert(_MIPP_ 1,y); return; } ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); ld=(int)((ly>>MR_BTS)&MR_MSK); for (i=0;i<ld;i++) y->w[i]=x->w[ln+i]; if (x==y) for (i=0;i<ln;i++) y->w[ld+i]=0; else for (i=ld;i<mr_lent(y);i++) y->w[i]=0; y->len=ld; } #endif unsigned int igcd(unsigned int x,unsigned int y) { /* integer GCD, returns GCD of x and y */ unsigned int r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned long lgcd(unsigned long x,unsigned long y) { /* long GCD, returns GCD of x and y */ unsigned long r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned int isqrt(unsigned int num,unsigned int guess) { /* square root of an integer */ unsigned int sqr; unsigned int oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } unsigned long mr_lsqrt(unsigned long num,unsigned long guess) { /* square root of a long */ unsigned long sqr; unsigned long oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } mr_small sgcd(mr_small x,mr_small y) { /* integer GCD, returns GCD of x and y */ mr_small r; #ifdef MR_FP mr_small dres; #endif if (y==(mr_small)0) return x; while ((r=MR_REMAIN(x,y))!=(mr_small)0) x=y,y=r; return y; } /* routines to support sliding-windows exponentiation * * in various contexts */ int mr_testbit(_MIPD_ big x,int n) { /* return value of n-th bit of big */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifdef MR_FP mr_small m,a,dres; m=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); a=x->w[n/mr_mip->lg2b]; a=MR_DIV(a,m); if ((MR_DIV(a,2.0)*2.0) != a) return 1; #else if ((x->w[n/mr_mip->lg2b] & ((mr_small)1<<(n%mr_mip->lg2b))) >0) return 1; #endif return 0; } void mr_addbit(_MIPD_ big x,int n) { /* add 2^n to positive x - where you know that bit is zero. Use with care! */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_lentype m=n/mr_mip->lg2b; x->w[m]+=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); if (x->len<m+1) x->len=m+1; } int recode(_MIPD_ big e,int t,int w,int i) { /* recode exponent for Comb method */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r; r=0; for (j=w-1;j>=0;j--) { r<<=1; r|=mr_testbit(_MIPP_ e,i+j*t); } return r; } int mr_window(_MIPD_ big x,int i,int *nbs,int * nzs,int window_size) { /* returns sliding window value, max. of 5 bits, * * (Note from version 5.23 this can be changed by * * setting parameter window_size. This can be * * a useful space-saver) starting at i-th bit of big x. * * nbs is number of bits processed, nzs is the number of * * additional trailing zeros detected. Returns valid bit * * pattern 1x..x1 with no two adjacent 0's. So 10101 * * will return 21 with nbs=5, nzs=0. 11001 will return 3,* * with nbs=2, nzs=2, having stopped after the first 11..*/ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r,w; w=window_size; /* check for leading 0 bit */ *nbs=1; *nzs=0; if (!mr_testbit(_MIPP_ x,i)) return 0; /* adjust window size if not enough bits left */ if (i-w+1<0) w=i+1; r=1; for (j=i-1;j>i-w;j--) { /* accumulate bits. Abort if two 0's in a row */ (*nbs)++; r*=2; if (mr_testbit(_MIPP_ x,j)) r+=1; if (r%4==0) { /* oops - too many zeros - shorten window */ r/=4; *nbs-=2; *nzs=2; break; } } if (r%2==0) { /* remove trailing 0 */ r/=2; *nzs=1; (*nbs)--; } return r; } int mr_window2(_MIPD_ big x,big y,int i,int *nbs,int *nzs) { /* two bit window for double exponentiation */ int r,w; BOOL a,b,c,d; w=2; *nbs=1; *nzs=0; /* check for two leading 0's */ a=mr_testbit(_MIPP_ x,i); b=mr_testbit(_MIPP_ y,i); if (!a && !b) return 0; if (i<1) w=1; if (a) { if (b) r=3; else r=2; } else r=1; if (w==1) return r; c=mr_testbit(_MIPP_ x,i-1); d=mr_testbit(_MIPP_ y,i-1); if (!c && !d) { *nzs=1; return r; } *nbs=2; r*=4; if (c) { if (d) r+=3; else r+=2; } else r+=1; return r; } int mr_naf_window(_MIPD_ big x,big x3,int i,int *nbs,int *nzs,int store) { /* returns sliding window value, using fractional windows * * where "store" precomputed values are precalulated and * * stored. Scanning starts at the i-th bit of x. nbs is * * the number of bits processed. nzs is number of * * additional trailing zeros detected. x and x3 (which is * * 3*x) are combined to produce the NAF (non-adjacent * * form). So if x=11011(27) and x3 is 1010001, the LSB is * * ignored and the value 100T0T (32-4-1=27) processed, * * where T is -1. Note x.P = (3x-x)/2.P. This value will * * return +7, with nbs=4 and nzs=1, having stopped after * * the first 4 bits. If it goes too far, it must backtrack * * Note in an NAF non-zero elements are never side by side, * * so 10T10T won't happen. NOTE: return value n zero or * * odd, -21 <= n <= +21 */ int nb,j,r,biggest; /* get first bit */ nb=mr_testbit(_MIPP_ x3,i)-mr_testbit(_MIPP_ x,i); *nbs=1; *nzs=0; if (nb==0) return 0; if (i==0) return nb; biggest=2*store-1; if (nb>0) r=1; else r=(-1); for (j=i-1;j>0;j--) { (*nbs)++; r*=2; nb=mr_testbit(_MIPP_ x3,j)-mr_testbit(_MIPP_ x,j); if (nb>0) r+=1; if (nb<0) r-=1; if (abs(r)>biggest) break; } if (r%2!=0 && j!=0) { /* backtrack */ if (nb>0) r=(r-1)/2; if (nb<0) r=(r+1)/2; (*nbs)--; } while (r%2==0) { /* remove trailing zeros */ r/=2; (*nzs)++; (*nbs)--; } return r; } /* Some general purpose elliptic curve stuff */ BOOL point_at_infinity(epoint *p) { if (p==NULL) return FALSE; if (p->marker==MR_EPOINT_INFINITY) return TRUE; return FALSE; } #ifndef MR_STATIC epoint* epoint_init(_MIPDO_ ) { /* initialise epoint to general point at infinity. */ epoint *p; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(96) /* Create space for whole structure in one heap access */ p=(epoint *)mr_alloc(_MIPP_ mr_esize(mr_mip->nib-1),1); ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem(_MIPP_ ptr,0); p->Y=mirvar_mem(_MIPP_ ptr,1); #ifndef MR_AFFINE_ONLY p->Z=mirvar_mem(_MIPP_ ptr,2); #endif p->marker=MR_EPOINT_INFINITY; MR_OUT return p; } #endif epoint* epoint_init_mem_variable(_MIPD_ char *mem,int index,int sz) { epoint *p; char *ptr; int offset,r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) p=(epoint *)&mem[offset+index*mr_esize_a(sz)]; else #endif p=(epoint *)&mem[offset+index*mr_esize(sz)]; ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem_variable(ptr,0,sz); p->Y=mirvar_mem_variable(ptr,1,sz); #ifndef MR_AFFINE_ONLY if (mr_mip->coord!=MR_AFFINE) p->Z=mirvar_mem_variable(ptr,2,sz); #endif p->marker=MR_EPOINT_INFINITY; return p; } epoint* epoint_init_mem(_MIPD_ char *mem,int index) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return epoint_init_mem_variable(_MIPP_ mem,index,mr_mip->nib-1); } #ifndef MR_STATIC /* allocate space for a number of epoints from the heap */ void *ecp_memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) return mr_alloc(_MIPP_ mr_ecp_reserve_a(num,mr_mip->nib-1),1); else #endif return mr_alloc(_MIPP_ mr_ecp_reserve(num,mr_mip->nib-1),1); } #endif void ecp_memkill(_MIPD_ char *mem,int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) memset(mem,0,mr_ecp_reserve_a(num,mr_mip->nib-1)); else #endif memset(mem,0,mr_ecp_reserve(num,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void epoint_free(epoint *p) { /* clean up point */ if (p==NULL) return; zero(p->X); zero(p->Y); #ifndef MR_AFFINE_ONLY if (p->marker==MR_EPOINT_GENERAL) zero(p->Z); #endif mr_free(p); } #endif
valid.res8.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_256_14_14_128_1_1.h" #include "gen_ukr_A4B2gemm_1_256_14_14_128_1_1.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 14; int Ny = 14; int Nh = 1; long long Astrides[6] = {0,2,4,6,8,10}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<128+0;c5+=128) { for(int xy5=0;xy5<196+0;xy5+=196) { for(int f5=0;f5<256+0;f5+=256) { for(int xy4=xy5;xy4<min(196, 196+xy5);xy4+=196) { for(int f4=f5;f4<min(256, 256+f5);f4+=256) { for(int c4=c5;c4<min(128, 128+c5);c4+=128) { for(int c3=c4;c3<min(128, 128+c4);c3+=Tc1) { for(int xy3=xy4;xy3<min(196, 196+xy4);xy3+=Txy3) { for(int f3=f4;f3<min(256, 256+f4);f3+=Tf2) { for(int xy2=xy3;xy2<min(196, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(256, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(128, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(128, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(196, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(256, 16+f2);f1+=16) { int ctile=min(Tc1, 128-c1); int x1=xy1/14; int y1=xy1%14/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*100352+c1_1*784+2*x1*28+2*y1*1+c1_2*1; int offsetB=0+kf1_1*2048+c1*16+0*16+0*16+kf1_2*1; int offsetC=0+b1*50176+of1_1*196+x1*14+y1*1+of1_2*1; if(14-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(14*14-xy1>=6){ for(int sti=14-y1;sti<6;sti+=1) { Astrides[sti]+=28; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=14-y1;sti<6;sti+=1) { Astrides[sti]-=28; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
ellipticBuildIpdg.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" int parallelCompareRowColumn(const void* a, const void* b); void ellipticBuildIpdgTri2D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts); void ellipticBuildIpdgTri3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts); void ellipticBuildIpdgQuad2D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts); void ellipticBuildIpdgQuad3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts); void ellipticBuildIpdgTet3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts); void ellipticBuildIpdgHex3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts); void ellipticBuildIpdg(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts) { switch(elliptic->elementType) { case TRIANGLES: { if(elliptic->dim == 2) ellipticBuildIpdgTri2D(elliptic, basisNp, basis,lambda, A, nnzA, globalStarts); else ellipticBuildIpdgTri3D(elliptic, basisNp, basis,lambda, A, nnzA, globalStarts); break; } case QUADRILATERALS: { if(elliptic->dim == 2) ellipticBuildIpdgQuad2D(elliptic, basisNp, basis,lambda, A, nnzA, globalStarts); else ellipticBuildIpdgQuad3D(elliptic, basisNp, basis,lambda, A, nnzA, globalStarts); break; } case TETRAHEDRA: ellipticBuildIpdgTet3D(elliptic, basisNp, basis,lambda, A, nnzA, globalStarts); break; case HEXAHEDRA: ellipticBuildIpdgHex3D(elliptic, basisNp, basis,lambda, A, nnzA, globalStarts); break; } } void ellipticBuildIpdgTri2D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts) { mesh_t* mesh = elliptic->mesh; setupAide options = elliptic->options; int rankM = mesh->rank; int Np = mesh->Np; int Nfp = mesh->Nfp; int Nfaces = mesh->Nfaces; dlong Nelements = mesh->Nelements; if(!basis) { // default to degree N Lagrange basis basisNp = Np; basis = (dfloat*) calloc(basisNp * basisNp, sizeof(dfloat)); for(int n = 0; n < basisNp; ++n) basis[n + n * basisNp] = 1; } // number of degrees of freedom on this rank hlong Nnum = Np * Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc((Nelements + mesh->totalHaloPairs) * Np,sizeof(hlong)); // every degree of freedom has its own global id MPI_Allgather(&Nnum, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; /* so find number of elements on each rank */ dlong* rankNelements = (dlong*) calloc(mesh->size, sizeof(dlong)); hlong* rankStarts = (hlong*) calloc(mesh->size + 1, sizeof(hlong)); MPI_Allgather(&Nelements, 1, MPI_DLONG, rankNelements, 1, MPI_DLONG, mesh->comm); //find offsets for(int r = 0; r < mesh->size; ++r) rankStarts[r + 1] = rankStarts[r] + rankNelements[r]; //use the offsets to set a global id for (dlong e = 0; e < Nelements; e++) for (int n = 0; n < Np; n++) globalIds[e * Np + n] = n + (e + rankStarts[rankM]) * Np; /* do a halo exchange of global node numbers */ if (mesh->totalHaloPairs) { hlong* idSendBuffer = (hlong*) calloc(Np * mesh->totalHaloPairs,sizeof(hlong)); meshHaloExchange(mesh, Np * sizeof(hlong), globalIds, idSendBuffer, globalIds + Nelements * Np); free(idSendBuffer); } dlong nnzLocalBound = basisNp * basisNp * (1 + Nfaces) * Nelements; // drop tolerance for entries in sparse storage dfloat tol = 1e-8; // surface mass matrices MS = MM*LIFT dfloat* MS = (dfloat*) calloc(Nfaces * Nfp * Nfp,sizeof(dfloat)); for (int f = 0; f < Nfaces; f++) for (int n = 0; n < Nfp; n++) { int fn = mesh->faceNodes[f * Nfp + n]; for (int m = 0; m < Nfp; m++) { dfloat MSnm = 0; for (int i = 0; i < Np; i++) MSnm += mesh->MM[fn + i * Np] * mesh->LIFT[i * Nfp * Nfaces + f * Nfp + m]; MS[m + n * Nfp + f * Nfp * Nfp] = MSnm; } } // reset non-zero counter dlong nnz = 0; *A = (nonZero_t*) calloc(nnzLocalBound, sizeof(nonZero_t)); dfloat* SM = (dfloat*) calloc(Np * Np,sizeof(dfloat)); dfloat* SP = (dfloat*) calloc(Np * Np,sizeof(dfloat)); if(rankM == 0) printf("Building full IPDG matrix..."); fflush(stdout); // loop over all elements for(dlong eM = 0; eM < Nelements; ++eM) { dlong vbase = eM * mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase + RXID]; dfloat drdy = mesh->vgeo[vbase + RYID]; dfloat dsdx = mesh->vgeo[vbase + SXID]; dfloat dsdy = mesh->vgeo[vbase + SYID]; dfloat J = mesh->vgeo[vbase + JID]; /* start with stiffness matrix */ for(int n = 0; n < Np; ++n) for(int m = 0; m < Np; ++m) { SM[n * Np + m] = J * lambda * mesh->MM[n * Np + m]; SM[n * Np + m] += J * drdx * drdx * mesh->Srr[n * Np + m]; SM[n * Np + m] += J * drdx * dsdx * mesh->Srs[n * Np + m]; SM[n * Np + m] += J * dsdx * drdx * mesh->Ssr[n * Np + m]; SM[n * Np + m] += J * dsdx * dsdx * mesh->Sss[n * Np + m]; SM[n * Np + m] += J * drdy * drdy * mesh->Srr[n * Np + m]; SM[n * Np + m] += J * drdy * dsdy * mesh->Srs[n * Np + m]; SM[n * Np + m] += J * dsdy * drdy * mesh->Ssr[n * Np + m]; SM[n * Np + m] += J * dsdy * dsdy * mesh->Sss[n * Np + m]; } for (int fM = 0; fM < Nfaces; fM++) { for (int n = 0; n < Np * Np; n++) SP[n] = 0; // load surface geofactors for this face dlong sid = mesh->Nsgeo * (eM * Nfaces + fM); dfloat nx = mesh->sgeo[sid + NXID]; dfloat ny = mesh->sgeo[sid + NYID]; dfloat sJ = mesh->sgeo[sid + SJID]; dfloat hinv = mesh->sgeo[sid + IHID]; dfloat penalty = elliptic->tau * hinv; dlong eP = mesh->EToE[eM * Nfaces + fM]; if (eP < 0) eP = eM; dlong vbaseP = eP * mesh->Nvgeo; dfloat drdxP = mesh->vgeo[vbaseP + RXID]; dfloat drdyP = mesh->vgeo[vbaseP + RYID]; dfloat dsdxP = mesh->vgeo[vbaseP + SXID]; dfloat dsdyP = mesh->vgeo[vbaseP + SYID]; int bcD = 0, bcN = 0; int bc = mesh->EToB[fM + Nfaces * eM]; //raw boundary flag int bcType = 0; if(bc > 0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType == 1) { // Dirichlet bcD = 1; bcN = 0; } else if(bcType == 2) { // Neumann bcD = 0; bcN = 1; } // reset eP eP = mesh->EToE[eM * Nfaces + fM]; // mass matrix for this face dfloat* MSf = MS + fM * Nfp * Nfp; // penalty term just involves face nodes for(int n = 0; n < Nfp; ++n) for(int m = 0; m < Nfp; ++m) { dlong idM = eM * Nfp * Nfaces + fM * Nfp + m; int nM = mesh->faceNodes[fM * Nfp + n]; int mM = mesh->faceNodes[fM * Nfp + m]; int mP = (int) (mesh->vmapP[idM] % Np); dfloat MSfnm = sJ * MSf[n * Nfp + m]; SM[nM * Np + mM] += 0.5 * (1. - bcN) * (1. + bcD) * penalty * MSfnm; SP[nM * Np + mP] += -0.5 * (1. - bcN) * (1. - bcD) * penalty * MSfnm; } // now add differential surface terms for(int n = 0; n < Nfp; ++n) for(int m = 0; m < Np; ++m) { int nM = mesh->faceNodes[fM * Nfp + n]; for(int i = 0; i < Nfp; ++i) { int iM = mesh->faceNodes[fM * Nfp + i]; int iP = (int) (mesh->vmapP[i + fM * Nfp + eM * Nfp * Nfaces] % Np); dfloat MSfni = sJ * MSf[n * Nfp + i]; // surface Jacobian built in dfloat DxMim = drdx * mesh->Dr[iM * Np + m] + dsdx * mesh->Ds[iM * Np + m]; dfloat DyMim = drdy * mesh->Dr[iM * Np + m] + dsdy * mesh->Ds[iM * Np + m]; dfloat DxPim = drdxP * mesh->Dr[iP * Np + m] + dsdxP * mesh->Ds[iP * Np + m]; dfloat DyPim = drdyP * mesh->Dr[iP * Np + m] + dsdyP * mesh->Ds[iP * Np + m]; // OP11 = OP11 + 0.5*( - mmE*Dn1) SM[nM * Np + m] += -0.5 * nx * (1 + bcD) * (1 - bcN) * MSfni * DxMim; SM[nM * Np + m] += -0.5 * ny * (1 + bcD) * (1 - bcN) * MSfni * DyMim; SP[nM * Np + m] += -0.5 * nx * (1 - bcD) * (1 - bcN) * MSfni * DxPim; SP[nM * Np + m] += -0.5 * ny * (1 - bcD) * (1 - bcN) * MSfni * DyPim; } } for(int n = 0; n < Np; ++n) for(int m = 0; m < Nfp; ++m) { int mM = mesh->faceNodes[fM * Nfp + m]; int mP = (int) (mesh->vmapP[m + fM * Nfp + eM * Nfp * Nfaces] % Np); for(int i = 0; i < Nfp; ++i) { int iM = mesh->faceNodes[fM * Nfp + i]; dfloat MSfim = sJ * MSf[i * Nfp + m]; dfloat DxMin = drdx * mesh->Dr[iM * Np + n] + dsdx * mesh->Ds[iM * Np + n]; dfloat DyMin = drdy * mesh->Dr[iM * Np + n] + dsdy * mesh->Ds[iM * Np + n]; SM[n * Np + mM] += -0.5 * nx * (1 + bcD) * (1 - bcN) * DxMin * MSfim; SM[n * Np + mM] += -0.5 * ny * (1 + bcD) * (1 - bcN) * DyMin * MSfim; SP[n * Np + mP] += +0.5 * nx * (1 - bcD) * (1 - bcN) * DxMin * MSfim; SP[n * Np + mP] += +0.5 * ny * (1 - bcD) * (1 - bcN) * DyMin * MSfim; } } // store non-zeros for off diagonal block for(int j = 0; j < basisNp; ++j) for(int i = 0; i < basisNp; ++i) { dfloat val = 0; for(int n = 0; n < Np; ++n) for(int m = 0; m < Np; ++m) val += basis[n * Np + j] * SP[n * Np + m] * basis[m * Np + i]; if(fabs(val) > tol) { (*A)[nnz].row = globalIds[eM * Np + j]; (*A)[nnz].col = globalIds[eP * Np + i]; (*A)[nnz].val = val; (*A)[nnz].ownerRank = rankM; ++nnz; } } } // store non-zeros for diagonal block for(int j = 0; j < basisNp; ++j) for(int i = 0; i < basisNp; ++i) { dfloat val = 0; for(int n = 0; n < Np; ++n) for(int m = 0; m < Np; ++m) val += basis[n * Np + j] * SM[n * Np + m] * basis[m * Np + i]; if(fabs(val) > tol) { (*A)[nnz].row = globalIds[eM * Np + j]; (*A)[nnz].col = globalIds[eM * Np + i]; (*A)[nnz].val = val; (*A)[nnz].ownerRank = rankM; ++nnz; } } } //printf("nnz = %d\n", nnz); qsort((*A), nnz, sizeof(nonZero_t), parallelCompareRowColumn); //*A = (nonZero_t*) realloc(*A, nnz*sizeof(nonZero_t)); *nnzA = nnz; if(rankM == 0) printf("done.\n"); #if 0 dfloat* Ap = (dfloat*) calloc(Np * Np * Nelements * Nelements,sizeof(dfloat)); for (int n = 0; n < nnz; n++) { int row = (*A)[n].row; int col = (*A)[n].col; Ap[col + row * Np * Nelements] = (*A)[n].val; } for (int i = 0; i < Np * Nelements; i++) { for (int j = 0; j < Nelements * Np; j++) printf("%4.2f \t", Ap[j + i * Np * Nelements]); printf("\n"); } #endif free(globalIds); free(SM); free(SP); free(MS); } void ellipticBuildIpdgTri3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts) { mesh_t* mesh = elliptic->mesh; setupAide options = elliptic->options; int rankM = mesh->rank; int Np = mesh->Np; int Nfp = mesh->Nfp; int Nfaces = mesh->Nfaces; dlong Nelements = mesh->Nelements; if(!basis) { // default to degree N Lagrange basis basisNp = Np; basis = (dfloat*) calloc(basisNp * basisNp, sizeof(dfloat)); for(int n = 0; n < basisNp; ++n) basis[n + n * basisNp] = 1; } // number of degrees of freedom on this rank hlong Nnum = Np * Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc((Nelements + mesh->totalHaloPairs) * Np,sizeof(hlong)); // every degree of freedom has its own global id MPI_Allgather(&Nnum, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; /* so find number of elements on each rank */ dlong* rankNelements = (dlong*) calloc(mesh->size, sizeof(dlong)); hlong* rankStarts = (hlong*) calloc(mesh->size + 1, sizeof(hlong)); MPI_Allgather(&Nelements, 1, MPI_DLONG, rankNelements, 1, MPI_DLONG, mesh->comm); //find offsets for(int r = 0; r < mesh->size; ++r) rankStarts[r + 1] = rankStarts[r] + rankNelements[r]; //use the offsets to set a global id for (dlong e = 0; e < Nelements; e++) for (int n = 0; n < Np; n++) globalIds[e * Np + n] = n + (e + rankStarts[rankM]) * Np; /* do a halo exchange of global node numbers */ if (mesh->totalHaloPairs) { hlong* idSendBuffer = (hlong*) calloc(Np * mesh->totalHaloPairs,sizeof(hlong)); meshHaloExchange(mesh, Np * sizeof(hlong), globalIds, idSendBuffer, globalIds + Nelements * Np); free(idSendBuffer); } dlong nnzLocalBound = basisNp * basisNp * (1 + Nfaces) * Nelements; // drop tolerance for entries in sparse storage dfloat tol = 1e-8; // surface mass matrices MS = MM*LIFT dfloat* MS = (dfloat*) calloc(Nfaces * Nfp * Nfp,sizeof(dfloat)); for (int f = 0; f < Nfaces; f++) for (int n = 0; n < Nfp; n++) { int fn = mesh->faceNodes[f * Nfp + n]; for (int m = 0; m < Nfp; m++) { dfloat MSnm = 0; for (int i = 0; i < Np; i++) MSnm += mesh->MM[fn + i * Np] * mesh->LIFT[i * Nfp * Nfaces + f * Nfp + m]; MS[m + n * Nfp + f * Nfp * Nfp] = MSnm; } } // reset non-zero counter dlong nnz = 0; *A = (nonZero_t*) calloc(nnzLocalBound, sizeof(nonZero_t)); dfloat* SM = (dfloat*) calloc(Np * Np,sizeof(dfloat)); dfloat* SP = (dfloat*) calloc(Np * Np,sizeof(dfloat)); if(rankM == 0) printf("Building full IPDG matrix..."); fflush(stdout); // loop over all elements for(dlong eM = 0; eM < Nelements; ++eM) { dlong vbase = eM * mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase + RXID]; dfloat drdy = mesh->vgeo[vbase + RYID]; dfloat drdz = mesh->vgeo[vbase + RZID]; dfloat dsdx = mesh->vgeo[vbase + SXID]; dfloat dsdy = mesh->vgeo[vbase + SYID]; dfloat dsdz = mesh->vgeo[vbase + SZID]; dfloat J = mesh->vgeo[vbase + JID]; /* start with stiffness matrix */ for(int n = 0; n < Np; ++n) for(int m = 0; m < Np; ++m) { SM[n * Np + m] = J * lambda * mesh->MM[n * Np + m]; SM[n * Np + m] += J * drdx * drdx * mesh->Srr[n * Np + m]; SM[n * Np + m] += J * drdx * dsdx * mesh->Srs[n * Np + m]; SM[n * Np + m] += J * dsdx * drdx * mesh->Ssr[n * Np + m]; SM[n * Np + m] += J * dsdx * dsdx * mesh->Sss[n * Np + m]; SM[n * Np + m] += J * drdy * drdy * mesh->Srr[n * Np + m]; SM[n * Np + m] += J * drdy * dsdy * mesh->Srs[n * Np + m]; SM[n * Np + m] += J * dsdy * drdy * mesh->Ssr[n * Np + m]; SM[n * Np + m] += J * dsdy * dsdy * mesh->Sss[n * Np + m]; SM[n * Np + m] += J * drdz * drdz * mesh->Srr[n * Np + m]; SM[n * Np + m] += J * drdz * dsdz * mesh->Srs[n * Np + m]; SM[n * Np + m] += J * dsdz * drdz * mesh->Ssr[n * Np + m]; SM[n * Np + m] += J * dsdz * dsdz * mesh->Sss[n * Np + m]; } for (int fM = 0; fM < Nfaces; fM++) { for (int n = 0; n < Np * Np; n++) SP[n] = 0; // load surface geofactors for this face dlong sid = mesh->Nsgeo * (eM * Nfaces + fM); dfloat nx = mesh->sgeo[sid + NXID]; dfloat ny = mesh->sgeo[sid + NYID]; dfloat nz = mesh->sgeo[sid + NZID]; dfloat sJ = mesh->sgeo[sid + SJID]; dfloat hinv = mesh->sgeo[sid + IHID]; dfloat penalty = elliptic->tau * hinv; dlong eP = mesh->EToE[eM * Nfaces + fM]; if (eP < 0) eP = eM; dlong vbaseP = eP * mesh->Nvgeo; dfloat drdxP = mesh->vgeo[vbaseP + RXID]; dfloat drdyP = mesh->vgeo[vbaseP + RYID]; dfloat drdzP = mesh->vgeo[vbaseP + RZID]; dfloat dsdxP = mesh->vgeo[vbaseP + SXID]; dfloat dsdyP = mesh->vgeo[vbaseP + SYID]; dfloat dsdzP = mesh->vgeo[vbaseP + SZID]; int bcD = 0, bcN = 0; int bc = mesh->EToB[fM + Nfaces * eM]; //raw boundary flag int bcType = 0; if(bc > 0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType == 1) { // Dirichlet bcD = 1; bcN = 0; } else if(bcType == 2) { // Neumann bcD = 0; bcN = 1; } // reset eP eP = mesh->EToE[eM * Nfaces + fM]; // mass matrix for this face dfloat* MSf = MS + fM * Nfp * Nfp; // penalty term just involves face nodes for(int n = 0; n < Nfp; ++n) for(int m = 0; m < Nfp; ++m) { dlong idM = eM * Nfp * Nfaces + fM * Nfp + m; int nM = mesh->faceNodes[fM * Nfp + n]; int mM = mesh->faceNodes[fM * Nfp + m]; int mP = (int) (mesh->vmapP[idM] % Np); dfloat MSfnm = sJ * MSf[n * Nfp + m]; SM[nM * Np + mM] += 0.5 * (1. - bcN) * (1. + bcD) * penalty * MSfnm; SP[nM * Np + mP] += -0.5 * (1. - bcN) * (1. - bcD) * penalty * MSfnm; } // now add differential surface terms for(int n = 0; n < Nfp; ++n) for(int m = 0; m < Np; ++m) { int nM = mesh->faceNodes[fM * Nfp + n]; for(int i = 0; i < Nfp; ++i) { int iM = mesh->faceNodes[fM * Nfp + i]; int iP = (int) (mesh->vmapP[i + fM * Nfp + eM * Nfp * Nfaces] % Np); dfloat MSfni = sJ * MSf[n * Nfp + i]; // surface Jacobian built in dfloat DxMim = drdx * mesh->Dr[iM * Np + m] + dsdx * mesh->Ds[iM * Np + m]; dfloat DyMim = drdy * mesh->Dr[iM * Np + m] + dsdy * mesh->Ds[iM * Np + m]; dfloat DzMim = drdz * mesh->Dr[iM * Np + m] + dsdz * mesh->Ds[iM * Np + m]; dfloat DxPim = drdxP * mesh->Dr[iP * Np + m] + dsdxP * mesh->Ds[iP * Np + m]; dfloat DyPim = drdyP * mesh->Dr[iP * Np + m] + dsdyP * mesh->Ds[iP * Np + m]; dfloat DzPim = drdzP * mesh->Dr[iP * Np + m] + dsdzP * mesh->Ds[iP * Np + m]; // OP11 = OP11 + 0.5*( - mmE*Dn1) SM[nM * Np + m] += -0.5 * nx * (1 + bcD) * (1 - bcN) * MSfni * DxMim; SM[nM * Np + m] += -0.5 * ny * (1 + bcD) * (1 - bcN) * MSfni * DyMim; SM[nM * Np + m] += -0.5 * nz * (1 + bcD) * (1 - bcN) * MSfni * DzMim; SP[nM * Np + m] += -0.5 * nx * (1 - bcD) * (1 - bcN) * MSfni * DxPim; SP[nM * Np + m] += -0.5 * ny * (1 - bcD) * (1 - bcN) * MSfni * DyPim; SP[nM * Np + m] += -0.5 * nz * (1 - bcD) * (1 - bcN) * MSfni * DzPim; } } for(int n = 0; n < Np; ++n) for(int m = 0; m < Nfp; ++m) { int mM = mesh->faceNodes[fM * Nfp + m]; int mP = (int) (mesh->vmapP[m + fM * Nfp + eM * Nfp * Nfaces] % Np); for(int i = 0; i < Nfp; ++i) { int iM = mesh->faceNodes[fM * Nfp + i]; dfloat MSfim = sJ * MSf[i * Nfp + m]; dfloat DxMin = drdx * mesh->Dr[iM * Np + n] + dsdx * mesh->Ds[iM * Np + n]; dfloat DyMin = drdy * mesh->Dr[iM * Np + n] + dsdy * mesh->Ds[iM * Np + n]; dfloat DzMin = drdz * mesh->Dr[iM * Np + n] + dsdz * mesh->Ds[iM * Np + n]; SM[n * Np + mM] += -0.5 * nx * (1 + bcD) * (1 - bcN) * DxMin * MSfim; SM[n * Np + mM] += -0.5 * ny * (1 + bcD) * (1 - bcN) * DyMin * MSfim; SM[n * Np + mM] += -0.5 * nz * (1 + bcD) * (1 - bcN) * DzMin * MSfim; SP[n * Np + mP] += +0.5 * nx * (1 - bcD) * (1 - bcN) * DxMin * MSfim; SP[n * Np + mP] += +0.5 * ny * (1 - bcD) * (1 - bcN) * DyMin * MSfim; SP[n * Np + mP] += +0.5 * nz * (1 - bcD) * (1 - bcN) * DzMin * MSfim; } } // store non-zeros for off diagonal block for(int j = 0; j < basisNp; ++j) for(int i = 0; i < basisNp; ++i) { dfloat val = 0; for(int n = 0; n < Np; ++n) for(int m = 0; m < Np; ++m) val += basis[n * Np + j] * SP[n * Np + m] * basis[m * Np + i]; if(fabs(val) > tol) { (*A)[nnz].row = globalIds[eM * Np + j]; (*A)[nnz].col = globalIds[eP * Np + i]; (*A)[nnz].val = val; (*A)[nnz].ownerRank = rankM; ++nnz; } } } // store non-zeros for diagonal block for(int j = 0; j < basisNp; ++j) for(int i = 0; i < basisNp; ++i) { dfloat val = 0; for(int n = 0; n < Np; ++n) for(int m = 0; m < Np; ++m) val += basis[n * Np + j] * SM[n * Np + m] * basis[m * Np + i]; if(fabs(val) > tol) { (*A)[nnz].row = globalIds[eM * Np + j]; (*A)[nnz].col = globalIds[eM * Np + i]; (*A)[nnz].val = val; (*A)[nnz].ownerRank = rankM; ++nnz; } } } //printf("nnz = %d\n", nnz); qsort((*A), nnz, sizeof(nonZero_t), parallelCompareRowColumn); //*A = (nonZero_t*) realloc(*A, nnz*sizeof(nonZero_t)); *nnzA = nnz; if(rankM == 0) printf("done.\n"); free(globalIds); free(SM); free(SP); free(MS); } void ellipticBuildIpdgQuad2D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts) { mesh_t* mesh = elliptic->mesh; setupAide options = elliptic->options; int rankM = mesh->rank; int Np = mesh->Np; int Nfp = mesh->Nfp; int Nfaces = mesh->Nfaces; dlong Nelements = mesh->Nelements; hlong Nnum = mesh->Np * mesh->Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc((Nelements + mesh->totalHaloPairs) * Np,sizeof(hlong)); // every degree of freedom has its own global id MPI_Allgather(&Nnum, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; /* so find number of elements on each rank */ dlong* rankNelements = (dlong*) calloc(mesh->size, sizeof(dlong)); hlong* rankStarts = (hlong*) calloc(mesh->size + 1, sizeof(hlong)); MPI_Allgather(&Nelements, 1, MPI_DLONG, rankNelements, 1, MPI_DLONG, mesh->comm); //find offsets for(int r = 0; r < mesh->size; ++r) rankStarts[r + 1] = rankStarts[r] + rankNelements[r]; //use the offsets to set a global id for (dlong e = 0; e < Nelements; e++) for (int n = 0; n < Np; n++) globalIds[e * Np + n] = n + (e + rankStarts[rankM]) * Np; /* do a halo exchange of global node numbers */ if (mesh->totalHaloPairs) { hlong* idSendBuffer = (hlong*) calloc(Np * mesh->totalHaloPairs,sizeof(hlong)); meshHaloExchange(mesh, Np * sizeof(hlong), globalIds, idSendBuffer, globalIds + Nelements * Np); free(idSendBuffer); } dlong nnzLocalBound = Np * Np * (1 + Nfaces) * Nelements; // drop tolerance for entries in sparse storage dfloat tol = 1e-8; // build some monolithic basis arrays (use Dr,Ds,Dt and insert MM instead of weights for tet version) dfloat* B = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); dfloat* Br = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); dfloat* Bs = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); int mode = 0; for(int nj = 0; nj < mesh->N + 1; ++nj) for(int ni = 0; ni < mesh->N + 1; ++ni) { int node = 0; for(int j = 0; j < mesh->N + 1; ++j) for(int i = 0; i < mesh->N + 1; ++i) { if(nj == j && ni == i) B[mode * mesh->Np + node] = 1; if(nj == j) Br[mode * mesh->Np + node] = mesh->D[ni + mesh->Nq * i]; if(ni == i) Bs[mode * mesh->Np + node] = mesh->D[nj + mesh->Nq * j]; ++node; } ++mode; } *A = (nonZero_t*) calloc(nnzLocalBound,sizeof(nonZero_t)); if(rankM == 0) printf("Building full IPDG matrix..."); fflush(stdout); // reset non-zero counter dlong nnz = 0; // loop over all elements for(dlong eM = 0; eM < mesh->Nelements; ++eM) /* build Dx,Dy (forget the TP for the moment) */ for(int n = 0; n < mesh->Np; ++n) for(int m = 0; m < mesh->Np; ++m) { // m will be the sub-block index for negative and positive trace dfloat Anm = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i = 0; i < mesh->Np; ++i) { dlong base = eM * mesh->Np * mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base + mesh->Np * RXID]; dfloat drdy = mesh->vgeo[base + mesh->Np * RYID]; dfloat dsdx = mesh->vgeo[base + mesh->Np * SXID]; dfloat dsdy = mesh->vgeo[base + mesh->Np * SYID]; dfloat JW = mesh->vgeo[base + mesh->Np * JWID]; int idn = n * mesh->Np + i; int idm = m * mesh->Np + i; dfloat dlndx = drdx * Br[idn] + dsdx * Bs[idn]; dfloat dlndy = drdy * Br[idn] + dsdy * Bs[idn]; dfloat dlmdx = drdx * Br[idm] + dsdx * Bs[idm]; dfloat dlmdy = drdy * Br[idm] + dsdy * Bs[idm]; Anm += JW * (dlndx * dlmdx + dlndy * dlmdy); Anm += lambda * JW * B[idn] * B[idm]; } // loop over all faces in this element for(int fM = 0; fM < mesh->Nfaces; ++fM) { // accumulate flux terms for negative and positive traces dfloat AnmP = 0; for(int i = 0; i < mesh->Nfp; ++i) { int vidM = mesh->faceNodes[i + fM * mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM * mesh->Np * mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM + mesh->Np * RXID]; dfloat drdyM = mesh->vgeo[baseM + mesh->Np * RYID]; dfloat dsdxM = mesh->vgeo[baseM + mesh->Np * SXID]; dfloat dsdyM = mesh->vgeo[baseM + mesh->Np * SYID]; // double check vol geometric factors are in halo storage of vgeo dlong idM = eM * mesh->Nfp * mesh->Nfaces + fM * mesh->Nfp + i; int vidP = (int) (mesh->vmapP[idM] % mesh->Np); // only use this to identify location of positive trace vgeo dlong localEP = mesh->vmapP[idM] / mesh->Np; dlong baseP = localEP * mesh->Np * mesh->Nvgeo + vidP; // use local offset for vgeo in halo dfloat drdxP = mesh->vgeo[baseP + mesh->Np * RXID]; dfloat drdyP = mesh->vgeo[baseP + mesh->Np * RYID]; dfloat dsdxP = mesh->vgeo[baseP + mesh->Np * SXID]; dfloat dsdyP = mesh->vgeo[baseP + mesh->Np * SYID]; // grab surface geometric factors dlong base = mesh->Nsgeo * (eM * mesh->Nfp * mesh->Nfaces + fM * mesh->Nfp + i); dfloat nx = mesh->sgeo[base + NXID]; dfloat ny = mesh->sgeo[base + NYID]; dfloat wsJ = mesh->sgeo[base + WSJID]; dfloat hinv = mesh->sgeo[base + IHID]; // form negative trace terms in IPDG int idnM = n * mesh->Np + vidM; int idmM = m * mesh->Np + vidM; int idmP = m * mesh->Np + vidP; dfloat dlndxM = drdxM * Br[idnM] + dsdxM * Bs[idnM]; dfloat dlndyM = drdyM * Br[idnM] + dsdyM * Bs[idnM]; dfloat ndotgradlnM = nx * dlndxM + ny * dlndyM; dfloat lnM = B[idnM]; dfloat dlmdxM = drdxM * Br[idmM] + dsdxM * Bs[idmM]; dfloat dlmdyM = drdyM * Br[idmM] + dsdyM * Bs[idmM]; dfloat ndotgradlmM = nx * dlmdxM + ny * dlmdyM; dfloat lmM = B[idmM]; dfloat dlmdxP = drdxP * Br[idmP] + dsdxP * Bs[idmP]; dfloat dlmdyP = drdyP * Br[idmP] + dsdyP * Bs[idmP]; dfloat ndotgradlmP = nx * dlmdxP + ny * dlmdyP; dfloat lmP = B[idmP]; dfloat penalty = elliptic->tau * hinv; Anm += -0.5 * wsJ * lnM * ndotgradlmM; // -(ln^-, N.grad lm^-) Anm += -0.5 * wsJ * ndotgradlnM * lmM; // -(N.grad ln^-, lm^-) Anm += +0.5 * wsJ * penalty * lnM * lmM; // +((tau/h)*ln^-,lm^-) dlong eP = mesh->EToE[eM * mesh->Nfaces + fM]; if (eP < 0) { int qSgn, gradqSgn; int bc = mesh->EToB[fM + mesh->Nfaces * eM]; //raw boundary flag int bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) if(bcType == 1) { // Dirichlet qSgn = -1; gradqSgn = 1; } else if (bcType == 2) { // Neumann qSgn = 1; gradqSgn = -1; } else { // Neumann for now qSgn = 1; gradqSgn = -1; } Anm += -0.5 * gradqSgn * wsJ * lnM * ndotgradlmM; // -(ln^-, -N.grad lm^-) Anm += +0.5 * qSgn * wsJ * ndotgradlnM * lmM; // +(N.grad ln^-, lm^-) Anm += -0.5 * qSgn * wsJ * penalty * lnM * lmM; // -((tau/h)*ln^-,lm^-) } else { AnmP += -0.5 * wsJ * lnM * ndotgradlmP; // -(ln^-, N.grad lm^+) AnmP += +0.5 * wsJ * ndotgradlnM * lmP; // +(N.grad ln^-, lm^+) AnmP += -0.5 * wsJ * penalty * lnM * lmP; // -((tau/h)*ln^-,lm^+) } } if(fabs(AnmP) > tol) { // remote info dlong eP = mesh->EToE[eM * mesh->Nfaces + fM]; (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eP * mesh->Np + m]; (*A)[nnz].val = AnmP; (*A)[nnz].ownerRank = rankM; ++nnz; } } if(fabs(Anm) > tol) { // local block (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eM * mesh->Np + m]; (*A)[nnz].val = Anm; (*A)[nnz].ownerRank = rankM; ++nnz; } } // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), nnz, sizeof(nonZero_t), parallelCompareRowColumn); //*A = (nonZero_t*) realloc(*A, nnz*sizeof(nonZero_t)); *nnzA = nnz; if(rankM == 0) printf("done.\n"); free(globalIds); free(B); free(Br); free(Bs); } void ellipticBuildIpdgQuad3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts) { mesh_t* mesh = elliptic->mesh; setupAide options = elliptic->options; int rankM = mesh->rank; int Np = mesh->Np; int Nfp = mesh->Nfp; int Nfaces = mesh->Nfaces; dlong Nelements = mesh->Nelements; hlong Nnum = mesh->Np * mesh->Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc((Nelements + mesh->totalHaloPairs) * Np,sizeof(hlong)); // every degree of freedom has its own global id MPI_Allgather(&Nnum, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; /* so find number of elements on each rank */ dlong* rankNelements = (dlong*) calloc(mesh->size, sizeof(dlong)); hlong* rankStarts = (hlong*) calloc(mesh->size + 1, sizeof(hlong)); MPI_Allgather(&Nelements, 1, MPI_DLONG, rankNelements, 1, MPI_DLONG, mesh->comm); //find offsets for(int r = 0; r < mesh->size; ++r) rankStarts[r + 1] = rankStarts[r] + rankNelements[r]; //use the offsets to set a global id for (dlong e = 0; e < Nelements; e++) for (int n = 0; n < Np; n++) globalIds[e * Np + n] = n + (e + rankStarts[rankM]) * Np; /* do a halo exchange of global node numbers */ if (mesh->totalHaloPairs) { hlong* idSendBuffer = (hlong*) calloc(Np * mesh->totalHaloPairs,sizeof(hlong)); meshHaloExchange(mesh, Np * sizeof(hlong), globalIds, idSendBuffer, globalIds + Nelements * Np); free(idSendBuffer); } dlong nnzLocalBound = Np * Np * (1 + Nfaces) * Nelements; // drop tolerance for entries in sparse storage dfloat tol = 1e-8; // build some monolithic basis arrays (use Dr,Ds,Dt and insert MM instead of weights for tet version) dfloat* B = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); dfloat* Br = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); dfloat* Bs = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); int mode = 0; for(int nj = 0; nj < mesh->N + 1; ++nj) for(int ni = 0; ni < mesh->N + 1; ++ni) { int node = 0; for(int j = 0; j < mesh->N + 1; ++j) for(int i = 0; i < mesh->N + 1; ++i) { if(nj == j && ni == i) B[mode * mesh->Np + node] = 1; if(nj == j) Br[mode * mesh->Np + node] = mesh->D[ni + mesh->Nq * i]; if(ni == i) Bs[mode * mesh->Np + node] = mesh->D[nj + mesh->Nq * j]; ++node; } ++mode; } *A = (nonZero_t*) calloc(nnzLocalBound,sizeof(nonZero_t)); if(rankM == 0) printf("Building full IPDG matrix..."); fflush(stdout); // reset non-zero counter dlong nnz = 0; // loop over all elements for(dlong eM = 0; eM < mesh->Nelements; ++eM) /* build Dx,Dy (forget the TP for the moment) */ for(int n = 0; n < mesh->Np; ++n) for(int m = 0; m < mesh->Np; ++m) { // m will be the sub-block index for negative and positive trace dfloat Anm = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i = 0; i < mesh->Np; ++i) { dlong base = eM * mesh->Np * mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base + mesh->Np * RXID]; dfloat drdy = mesh->vgeo[base + mesh->Np * RYID]; dfloat drdz = mesh->vgeo[base + mesh->Np * RZID]; dfloat dsdx = mesh->vgeo[base + mesh->Np * SXID]; dfloat dsdy = mesh->vgeo[base + mesh->Np * SYID]; dfloat dsdz = mesh->vgeo[base + mesh->Np * SZID]; dfloat dtdx = mesh->vgeo[base + mesh->Np * TXID]; dfloat dtdy = mesh->vgeo[base + mesh->Np * TYID]; dfloat dtdz = mesh->vgeo[base + mesh->Np * TZID]; dfloat JW = mesh->vgeo[base + mesh->Np * JWID]; int idn = n * mesh->Np + i; int idm = m * mesh->Np + i; dfloat dlndx = drdx * Br[idn] + dsdx * Bs[idn]; +dtdx; dfloat dlndy = drdy * Br[idn] + dsdy * Bs[idn]; +dtdy; dfloat dlndz = drdz * Br[idn] + dsdz * Bs[idn]; +dtdz; dfloat dlmdx = drdx * Br[idm] + dsdx * Bs[idm]; +dtdx; dfloat dlmdy = drdy * Br[idm] + dsdy * Bs[idm]; +dtdy; dfloat dlmdz = drdz * Br[idm] + dsdz * Bs[idm]; +dtdz; Anm += JW * (dlndx * dlmdx + dlndy * dlmdy + dlndz * dlmdz); Anm += lambda * JW * B[idn] * B[idm]; } // loop over all faces in this element for(int fM = 0; fM < mesh->Nfaces; ++fM) { // accumulate flux terms for negative and positive traces dfloat AnmP = 0; for(int i = 0; i < mesh->Nfp; ++i) { int vidM = mesh->faceNodes[i + fM * mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM * mesh->Np * mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM + mesh->Np * RXID]; dfloat drdyM = mesh->vgeo[baseM + mesh->Np * RYID]; dfloat drdzM = mesh->vgeo[baseM + mesh->Np * RZID]; dfloat dsdxM = mesh->vgeo[baseM + mesh->Np * SXID]; dfloat dsdyM = mesh->vgeo[baseM + mesh->Np * SYID]; dfloat dsdzM = mesh->vgeo[baseM + mesh->Np * SZID]; dfloat dtdxM = mesh->vgeo[baseM + mesh->Np * TXID]; dfloat dtdyM = mesh->vgeo[baseM + mesh->Np * TYID]; dfloat dtdzM = mesh->vgeo[baseM + mesh->Np * TZID]; // double check vol geometric factors are in halo storage of vgeo dlong idM = eM * mesh->Nfp * mesh->Nfaces + fM * mesh->Nfp + i; int vidP = (int) (mesh->vmapP[idM] % mesh->Np); // only use this to identify location of positive trace vgeo dlong localEP = mesh->vmapP[idM] / mesh->Np; dlong baseP = localEP * mesh->Np * mesh->Nvgeo + vidP; // use local offset for vgeo in halo dfloat drdxP = mesh->vgeo[baseP + mesh->Np * RXID]; dfloat drdyP = mesh->vgeo[baseP + mesh->Np * RYID]; dfloat drdzP = mesh->vgeo[baseP + mesh->Np * RZID]; dfloat dsdxP = mesh->vgeo[baseP + mesh->Np * SXID]; dfloat dsdyP = mesh->vgeo[baseP + mesh->Np * SYID]; dfloat dsdzP = mesh->vgeo[baseP + mesh->Np * SZID]; dfloat dtdxP = mesh->vgeo[baseP + mesh->Np * TXID]; dfloat dtdyP = mesh->vgeo[baseP + mesh->Np * TYID]; dfloat dtdzP = mesh->vgeo[baseP + mesh->Np * TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo * (eM * mesh->Nfp * mesh->Nfaces + fM * mesh->Nfp + i); dfloat nx = mesh->sgeo[base + NXID]; dfloat ny = mesh->sgeo[base + NYID]; dfloat nz = mesh->sgeo[base + NZID]; dfloat wsJ = mesh->sgeo[base + WSJID]; dfloat hinv = mesh->sgeo[base + IHID]; // form negative trace terms in IPDG int idnM = n * mesh->Np + vidM; int idmM = m * mesh->Np + vidM; int idmP = m * mesh->Np + vidP; dfloat dlndxM = drdxM * Br[idnM] + dsdxM * Bs[idnM]; +dtdxM; dfloat dlndyM = drdyM * Br[idnM] + dsdyM * Bs[idnM]; +dtdyM; dfloat dlndzM = drdzM * Br[idnM] + dsdzM * Bs[idnM]; +dtdzM; dfloat ndotgradlnM = nx * dlndxM + ny * dlndyM + nz * dlndzM; dfloat lnM = B[idnM]; dfloat dlmdxM = drdxM * Br[idmM] + dsdxM * Bs[idmM]; +dtdxM; dfloat dlmdyM = drdyM * Br[idmM] + dsdyM * Bs[idmM]; +dtdyM; dfloat dlmdzM = drdzM * Br[idmM] + dsdzM * Bs[idmM]; +dtdzM; dfloat ndotgradlmM = nx * dlmdxM + ny * dlmdyM + nz * dlmdzM; dfloat lmM = B[idmM]; dfloat dlmdxP = drdxP * Br[idmP] + dsdxP * Bs[idmP]; +dtdxP; dfloat dlmdyP = drdyP * Br[idmP] + dsdyP * Bs[idmP]; +dtdyP; dfloat dlmdzP = drdzP * Br[idmP] + dsdzP * Bs[idmP]; +dtdzP; dfloat ndotgradlmP = nx * dlmdxP + ny * dlmdyP + nz * dlmdzP; dfloat lmP = B[idmP]; dfloat penalty = elliptic->tau * hinv; Anm += -0.5 * wsJ * lnM * ndotgradlmM; // -(ln^-, N.grad lm^-) Anm += -0.5 * wsJ * ndotgradlnM * lmM; // -(N.grad ln^-, lm^-) Anm += +0.5 * wsJ * penalty * lnM * lmM; // +((tau/h)*ln^-,lm^-) AnmP += -0.5 * wsJ * lnM * ndotgradlmP; // -(ln^-, N.grad lm^+) AnmP += +0.5 * wsJ * ndotgradlnM * lmP; // +(N.grad ln^-, lm^+) AnmP += -0.5 * wsJ * penalty * lnM * lmP; // -((tau/h)*ln^-,lm^+) } if(fabs(AnmP) > tol) { // remote info dlong eP = mesh->EToE[eM * mesh->Nfaces + fM]; (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eP * mesh->Np + m]; (*A)[nnz].val = AnmP; (*A)[nnz].ownerRank = rankM; ++nnz; } } if(fabs(Anm) > tol) { // local block (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eM * mesh->Np + m]; (*A)[nnz].val = Anm; (*A)[nnz].ownerRank = rankM; ++nnz; } } // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), nnz, sizeof(nonZero_t), parallelCompareRowColumn); //*A = (nonZero_t*) realloc(*A, nnz*sizeof(nonZero_t)); *nnzA = nnz; if(rankM == 0) printf("done.\n"); #if 0 { FILE* fp = fopen("DGS.dat", "w"); for(int n = 0; n < nnz; ++n) fprintf(fp, "%d %d %17.15lf\n", (*A)[n].row + 1, (*A)[n].col + 1, (*A)[n].val); fclose(fp); } #endif free(globalIds); free(B); free(Br); free(Bs); } void ellipticBuildIpdgTet3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts) { mesh_t* mesh = elliptic->mesh; setupAide options = elliptic->options; int rankM = mesh->rank; // number of degrees of freedom on this rank hlong Nnum = mesh->Np * mesh->Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc((mesh->Nelements + mesh->totalHaloPairs) * mesh->Np,sizeof(hlong)); // every degree of freedom has its own global id MPI_Allgather(&Nnum, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; /* so find number of elements on each rank */ dlong* rankNelements = (dlong*) calloc(mesh->size, sizeof(dlong)); hlong* rankStarts = (hlong*) calloc(mesh->size + 1, sizeof(hlong)); dlong Nelements = mesh->Nelements; MPI_Allgather(&(mesh->Nelements), 1, MPI_DLONG, rankNelements, 1, MPI_DLONG, mesh->comm); //find offsets for(int r = 0; r < mesh->size; ++r) rankStarts[r + 1] = rankStarts[r] + rankNelements[r]; //use the offsets to set a global id for (dlong e = 0; e < mesh->Nelements; e++) for (int n = 0; n < mesh->Np; n++) globalIds[e * mesh->Np + n] = n + (e + rankStarts[rankM]) * mesh->Np; /* do a halo exchange of global node numbers */ if (mesh->totalHaloPairs) { hlong* idSendBuffer = (hlong*) calloc(mesh->Np * mesh->totalHaloPairs,sizeof(hlong)); meshHaloExchange(mesh, mesh->Np * sizeof(hlong), globalIds, idSendBuffer, globalIds + mesh->Nelements * mesh->Np); free(idSendBuffer); } dlong nnzLocalBound = mesh->Np * mesh->Np * (1 + mesh->Nfaces) * mesh->Nelements; // drop tolerance for entries in sparse storage dfloat tol = 1e-8; // surface mass matrices MS = MM*LIFT dfloat* MS = (dfloat*) calloc(mesh->Nfaces * mesh->Np * mesh->Nfp,sizeof(dfloat)); for (int f = 0; f < mesh->Nfaces; f++) for (int n = 0; n < mesh->Np; n++) for (int m = 0; m < mesh->Nfp; m++) { dfloat MSnm = 0; for (int i = 0; i < mesh->Np; i++) MSnm += mesh->MM[n + i * mesh->Np] * mesh->LIFT[i * mesh->Nfp * mesh->Nfaces + f * mesh->Nfp + m]; MS[m + n * mesh->Nfp + f * mesh->Nfp * mesh->Np] = MSnm; } // DrT*MS, DsT*MS, DtT*MS dfloat* DrTMS = (dfloat*) calloc(mesh->Nfaces * mesh->Np * mesh->Nfp,sizeof(dfloat)); dfloat* DsTMS = (dfloat*) calloc(mesh->Nfaces * mesh->Np * mesh->Nfp,sizeof(dfloat)); dfloat* DtTMS = (dfloat*) calloc(mesh->Nfaces * mesh->Np * mesh->Nfp,sizeof(dfloat)); for (int f = 0; f < mesh->Nfaces; f++) for (int n = 0; n < mesh->Np; n++) for (int i = 0; i < mesh->Nfp; i++) { DrTMS[i + n * mesh->Nfp + f * mesh->Nfp * mesh->Np] = 0.; DsTMS[i + n * mesh->Nfp + f * mesh->Nfp * mesh->Np] = 0.; DtTMS[i + n * mesh->Nfp + f * mesh->Nfp * mesh->Np] = 0.; for (int m = 0; m < mesh->Np; m++) { DrTMS[i + n * mesh->Nfp + f * mesh->Nfp * mesh->Np] += mesh->Dr[n + m * mesh->Np] * MS[i + m * mesh->Nfp + f * mesh->Nfp * mesh->Np]; DsTMS[i + n * mesh->Nfp + f * mesh->Nfp * mesh->Np] += mesh->Ds[n + m * mesh->Np] * MS[i + m * mesh->Nfp + f * mesh->Nfp * mesh->Np]; DtTMS[i + n * mesh->Nfp + f * mesh->Nfp * mesh->Np] += mesh->Dt[n + m * mesh->Np] * MS[i + m * mesh->Nfp + f * mesh->Nfp * mesh->Np]; } } *A = (nonZero_t*) calloc(nnzLocalBound,sizeof(nonZero_t)); // reset non-zero counter dlong nnz = 0; if(rankM == 0) printf("Building full IPDG matrix..."); fflush(stdout); // loop over all elements #pragma omp parallel { dfloat* BM = (dfloat*) calloc(mesh->Np * mesh->Np,sizeof(dfloat)); dfloat* qmP = (dfloat*) calloc(mesh->Nfp,sizeof(dfloat)); dfloat* qmM = (dfloat*) calloc(mesh->Nfp,sizeof(dfloat)); dfloat* ndotgradqmM = (dfloat*) calloc(mesh->Nfp,sizeof(dfloat)); dfloat* ndotgradqmP = (dfloat*) calloc(mesh->Nfp,sizeof(dfloat)); #pragma omp for for(dlong eM = 0; eM < mesh->Nelements; ++eM) { dlong gbase = eM * mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Grt = mesh->ggeo[gbase + G02ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat Gst = mesh->ggeo[gbase + G12ID]; dfloat Gtt = mesh->ggeo[gbase + G22ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n = 0; n < mesh->Np; ++n) for(int m = 0; m < mesh->Np; ++m) { BM[m + n * mesh->Np] = J * lambda * mesh->MM[m + n * mesh->Np]; BM[m + n * mesh->Np] += Grr * mesh->Srr[m + n * mesh->Np]; BM[m + n * mesh->Np] += Grs * mesh->Srs[m + n * mesh->Np]; BM[m + n * mesh->Np] += Grt * mesh->Srt[m + n * mesh->Np]; BM[m + n * mesh->Np] += Grs * mesh->Ssr[m + n * mesh->Np]; BM[m + n * mesh->Np] += Gss * mesh->Sss[m + n * mesh->Np]; BM[m + n * mesh->Np] += Gst * mesh->Sst[m + n * mesh->Np]; BM[m + n * mesh->Np] += Grt * mesh->Str[m + n * mesh->Np]; BM[m + n * mesh->Np] += Gst * mesh->Sts[m + n * mesh->Np]; BM[m + n * mesh->Np] += Gtt * mesh->Stt[m + n * mesh->Np]; } dlong vbase = eM * mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase + RXID]; dfloat drdy = mesh->vgeo[vbase + RYID]; dfloat drdz = mesh->vgeo[vbase + RZID]; dfloat dsdx = mesh->vgeo[vbase + SXID]; dfloat dsdy = mesh->vgeo[vbase + SYID]; dfloat dsdz = mesh->vgeo[vbase + SZID]; dfloat dtdx = mesh->vgeo[vbase + TXID]; dfloat dtdy = mesh->vgeo[vbase + TYID]; dfloat dtdz = mesh->vgeo[vbase + TZID]; for (int m = 0; m < mesh->Np; m++) { for (int fM = 0; fM < mesh->Nfaces; fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo * (eM * mesh->Nfaces + fM); dfloat nx = mesh->sgeo[sid + NXID]; dfloat ny = mesh->sgeo[sid + NYID]; dfloat nz = mesh->sgeo[sid + NZID]; dfloat sJ = mesh->sgeo[sid + SJID]; dfloat hinv = mesh->sgeo[sid + IHID]; dlong eP = mesh->EToE[eM * mesh->Nfaces + fM]; if (eP < 0) eP = eM; dlong vbaseP = eP * mesh->Nvgeo; dfloat drdxP = mesh->vgeo[vbaseP + RXID]; dfloat drdyP = mesh->vgeo[vbaseP + RYID]; dfloat drdzP = mesh->vgeo[vbaseP + RZID]; dfloat dsdxP = mesh->vgeo[vbaseP + SXID]; dfloat dsdyP = mesh->vgeo[vbaseP + SYID]; dfloat dsdzP = mesh->vgeo[vbaseP + SZID]; dfloat dtdxP = mesh->vgeo[vbaseP + TXID]; dfloat dtdyP = mesh->vgeo[vbaseP + TYID]; dfloat dtdzP = mesh->vgeo[vbaseP + TZID]; // extract trace nodes for (int i = 0; i < mesh->Nfp; i++) { // double check vol geometric factors are in halo storage of vgeo int idM = eM * mesh->Nfp * mesh->Nfaces + fM * mesh->Nfp + i; int vidM = mesh->faceNodes[i + fM * mesh->Nfp]; int vidP = (int) (mesh->vmapP[idM] % mesh->Np);// only use this to identify location of positive trace vgeo qmM[i] = 0; if (vidM == m) qmM[i] = 1; qmP[i] = 0; if (vidP == m) qmP[i] = 1; ndotgradqmM[i] = (nx * drdx + ny * drdy + nz * drdz) * mesh->Dr[m + vidM * mesh->Np] + (nx * dsdx + ny * dsdy + nz * dsdz) * mesh->Ds[m + vidM * mesh->Np] + (nx * dtdx + ny * dtdy + nz * dtdz) * mesh->Dt[m + vidM * mesh->Np]; ndotgradqmP[i] = (nx * drdxP + ny * drdyP + nz * drdzP) * mesh->Dr[m + vidP * mesh->Np] + (nx * dsdxP + ny * dsdyP + nz * dsdzP) * mesh->Ds[m + vidP * mesh->Np] + (nx * dtdxP + ny * dtdyP + nz * dtdzP) * mesh->Dt[m + vidP * mesh->Np]; } dfloat penalty = elliptic->tau * hinv; eP = mesh->EToE[eM * mesh->Nfaces + fM]; for (int n = 0; n < mesh->Np; n++) { for (int i = 0; i < mesh->Nfp; i++) { BM[m + n * mesh->Np] += -0.5 * sJ * MS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * ndotgradqmM[i]; BM[m + n * mesh->Np] += -0.5 * sJ * (nx * drdx + ny * drdy + nz * drdz) * DrTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmM[i] - 0.5 * sJ * (nx * dsdx + ny * dsdy + nz * dsdz) * DsTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmM[i] - 0.5 * sJ * (nx * dtdx + ny * dtdy + nz * dtdz) * DtTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmM[i]; BM[m + n * mesh->Np] += +0.5 * sJ * MS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * penalty * qmM[i]; } dfloat AnmP = 0; if (eP < 0) { int qSgn, gradqSgn; int bc = mesh->EToB[fM + mesh->Nfaces * eM]; //raw boundary flag int bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) if(bcType == 1) { // Dirichlet qSgn = -1; gradqSgn = 1; } else if (bcType == 2) { // Neumann qSgn = 1; gradqSgn = -1; } else { // Neumann for now qSgn = 1; gradqSgn = -1; } for (int i = 0; i < mesh->Nfp; i++) { BM[m + n * mesh->Np] += -0.5 * gradqSgn * sJ * MS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * ndotgradqmM[i]; BM[m + n * mesh->Np] += +0.5 * qSgn * sJ * (nx * drdx + ny * drdy + nz * drdz) * DrTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmM[i] + 0.5 * qSgn * sJ * (nx * dsdx + ny * dsdy + nz * dsdz) * DsTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmM[i] + 0.5 * qSgn * sJ * (nx * dtdx + ny * dtdy + nz * dtdz) * DtTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmM[i]; BM[m + n * mesh->Np] += -0.5 * qSgn * sJ * MS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * penalty * qmM[i]; } } else { for (int i = 0; i < mesh->Nfp; i++) { AnmP += -0.5 * sJ * MS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * ndotgradqmP[i]; AnmP += +0.5 * sJ * (nx * drdx + ny * drdy + nz * drdz) * DrTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmP[i] + 0.5 * sJ * (nx * dsdx + ny * dsdy + nz * dsdz) * DsTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmP[i] + 0.5 * sJ * (nx * dtdx + ny * dtdy + nz * dtdz) * DtTMS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * qmP[i]; AnmP += -0.5 * sJ * MS[i + n * mesh->Nfp + fM * mesh->Nfp * mesh->Np] * penalty * qmP[i]; } } if(fabs(AnmP) > tol) { #pragma omp critical { // remote info (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eP * mesh->Np + m]; (*A)[nnz].val = AnmP; (*A)[nnz].ownerRank = rankM; ++nnz; } } } } } for (int n = 0; n < mesh->Np; n++) { for (int m = 0; m < mesh->Np; m++) { dfloat Anm = BM[m + n * mesh->Np]; if(fabs(Anm) > tol) { #pragma omp critical { (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eM * mesh->Np + m]; (*A)[nnz].val = Anm; (*A)[nnz].ownerRank = rankM; ++nnz; } } } } } free(BM); free(qmM); free(qmP); free(ndotgradqmM); free(ndotgradqmP); } qsort((*A), nnz, sizeof(nonZero_t), parallelCompareRowColumn); // free up unused storage //*A = (nonZero_t*) realloc(*A, nnz*sizeof(nonZero_t)); *nnzA = nnz; if(rankM == 0) printf("done.\n"); free(globalIds); free(MS); free(DrTMS); free(DsTMS); free(DtTMS); } void ellipticBuildIpdgHex3D(elliptic_t* elliptic, int basisNp, dfloat* basis, dfloat lambda, nonZero_t** A, dlong* nnzA, hlong* globalStarts) { mesh_t* mesh = elliptic->mesh; setupAide options = elliptic->options; int rankM = mesh->rank; int Np = mesh->Np; int Nfp = mesh->Nfp; int Nfaces = mesh->Nfaces; dlong Nelements = mesh->Nelements; hlong Nnum = mesh->Np * mesh->Nelements; // create a global numbering system hlong* globalIds = (hlong*) calloc((Nelements + mesh->totalHaloPairs) * Np,sizeof(hlong)); // every degree of freedom has its own global id MPI_Allgather(&Nnum, 1, MPI_HLONG, globalStarts + 1, 1, MPI_HLONG, mesh->comm); for(int r = 0; r < mesh->size; ++r) globalStarts[r + 1] = globalStarts[r] + globalStarts[r + 1]; /* so find number of elements on each rank */ dlong* rankNelements = (dlong*) calloc(mesh->size, sizeof(dlong)); hlong* rankStarts = (hlong*) calloc(mesh->size + 1, sizeof(hlong)); MPI_Allgather(&Nelements, 1, MPI_DLONG, rankNelements, 1, MPI_DLONG, mesh->comm); //find offsets for(int r = 0; r < mesh->size; ++r) rankStarts[r + 1] = rankStarts[r] + rankNelements[r]; //use the offsets to set a global id for (dlong e = 0; e < Nelements; e++) for (int n = 0; n < Np; n++) globalIds[e * Np + n] = n + (e + rankStarts[rankM]) * Np; /* do a halo exchange of global node numbers */ if (mesh->totalHaloPairs) { hlong* idSendBuffer = (hlong*) calloc(Np * mesh->totalHaloPairs,sizeof(hlong)); meshHaloExchange(mesh, Np * sizeof(hlong), globalIds, idSendBuffer, globalIds + Nelements * Np); free(idSendBuffer); } dlong nnzLocalBound = Np * Np * (1 + Nfaces) * Nelements; // drop tolerance for entries in sparse storage dfloat tol = 1e-8; // build some monolithic basis arrays (use Dr,Ds,Dt and insert MM instead of weights for tet version) dfloat* B = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); dfloat* Br = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); dfloat* Bs = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); dfloat* Bt = (dfloat*) calloc(mesh->Np * mesh->Np, sizeof(dfloat)); int mode = 0; for(int nk = 0; nk < mesh->N + 1; ++nk) for(int nj = 0; nj < mesh->N + 1; ++nj) for(int ni = 0; ni < mesh->N + 1; ++ni) { int node = 0; for(int k = 0; k < mesh->N + 1; ++k) for(int j = 0; j < mesh->N + 1; ++j) for(int i = 0; i < mesh->N + 1; ++i) { if(nk == k && nj == j && ni == i) B[mode * mesh->Np + node] = 1; if(nj == j && nk == k) Br[mode * mesh->Np + node] = mesh->D[ni + mesh->Nq * i]; if(ni == i && nk == k) Bs[mode * mesh->Np + node] = mesh->D[nj + mesh->Nq * j]; if(ni == i && nj == j) Bt[mode * mesh->Np + node] = mesh->D[nk + mesh->Nq * k]; ++node; } ++mode; } *A = (nonZero_t*) calloc(nnzLocalBound,sizeof(nonZero_t)); if(rankM == 0) printf("Building full IPDG matrix..."); fflush(stdout); // reset non-zero counter dlong nnz = 0; // loop over all elements //#pragma omp parallel for for(dlong eM = 0; eM < mesh->Nelements; ++eM) /* build Dx,Dy,Dz (forget the TP for the moment) */ for(int n = 0; n < mesh->Np; ++n) for(int m = 0; m < mesh->Np; ++m) { // m will be the sub-block index for negative and positive trace dfloat Anm = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i = 0; i < mesh->Np; ++i) { dlong base = eM * mesh->Np * mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base + mesh->Np * RXID]; dfloat drdy = mesh->vgeo[base + mesh->Np * RYID]; dfloat drdz = mesh->vgeo[base + mesh->Np * RZID]; dfloat dsdx = mesh->vgeo[base + mesh->Np * SXID]; dfloat dsdy = mesh->vgeo[base + mesh->Np * SYID]; dfloat dsdz = mesh->vgeo[base + mesh->Np * SZID]; dfloat dtdx = mesh->vgeo[base + mesh->Np * TXID]; dfloat dtdy = mesh->vgeo[base + mesh->Np * TYID]; dfloat dtdz = mesh->vgeo[base + mesh->Np * TZID]; dfloat JW = mesh->vgeo[base + mesh->Np * JWID]; int idn = n * mesh->Np + i; int idm = m * mesh->Np + i; dfloat dlndx = drdx * Br[idn] + dsdx * Bs[idn] + dtdx * Bt[idn]; dfloat dlndy = drdy * Br[idn] + dsdy * Bs[idn] + dtdy * Bt[idn]; dfloat dlndz = drdz * Br[idn] + dsdz * Bs[idn] + dtdz * Bt[idn]; dfloat dlmdx = drdx * Br[idm] + dsdx * Bs[idm] + dtdx * Bt[idm]; dfloat dlmdy = drdy * Br[idm] + dsdy * Bs[idm] + dtdy * Bt[idm]; dfloat dlmdz = drdz * Br[idm] + dsdz * Bs[idm] + dtdz * Bt[idm]; Anm += JW * (dlndx * dlmdx + dlndy * dlmdy + dlndz * dlmdz); Anm += lambda * JW * B[idn] * B[idm]; } // loop over all faces in this element for(int fM = 0; fM < mesh->Nfaces; ++fM) { // accumulate flux terms for negative and positive traces dfloat AnmP = 0; for(int i = 0; i < mesh->Nfp; ++i) { int vidM = mesh->faceNodes[i + fM * mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM * mesh->Np * mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM + mesh->Np * RXID]; dfloat drdyM = mesh->vgeo[baseM + mesh->Np * RYID]; dfloat drdzM = mesh->vgeo[baseM + mesh->Np * RZID]; dfloat dsdxM = mesh->vgeo[baseM + mesh->Np * SXID]; dfloat dsdyM = mesh->vgeo[baseM + mesh->Np * SYID]; dfloat dsdzM = mesh->vgeo[baseM + mesh->Np * SZID]; dfloat dtdxM = mesh->vgeo[baseM + mesh->Np * TXID]; dfloat dtdyM = mesh->vgeo[baseM + mesh->Np * TYID]; dfloat dtdzM = mesh->vgeo[baseM + mesh->Np * TZID]; // double check vol geometric factors are in halo storage of vgeo dlong idM = eM * mesh->Nfp * mesh->Nfaces + fM * mesh->Nfp + i; int vidP = (int) (mesh->vmapP[idM] % mesh->Np); // only use this to identify location of positive trace vgeo dlong localEP = mesh->vmapP[idM] / mesh->Np; dlong baseP = localEP * mesh->Np * mesh->Nvgeo + vidP; // use local offset for vgeo in halo dfloat drdxP = mesh->vgeo[baseP + mesh->Np * RXID]; dfloat drdyP = mesh->vgeo[baseP + mesh->Np * RYID]; dfloat drdzP = mesh->vgeo[baseP + mesh->Np * RZID]; dfloat dsdxP = mesh->vgeo[baseP + mesh->Np * SXID]; dfloat dsdyP = mesh->vgeo[baseP + mesh->Np * SYID]; dfloat dsdzP = mesh->vgeo[baseP + mesh->Np * SZID]; dfloat dtdxP = mesh->vgeo[baseP + mesh->Np * TXID]; dfloat dtdyP = mesh->vgeo[baseP + mesh->Np * TYID]; dfloat dtdzP = mesh->vgeo[baseP + mesh->Np * TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo * (eM * mesh->Nfp * mesh->Nfaces + fM * mesh->Nfp + i); dfloat nx = mesh->sgeo[base + NXID]; dfloat ny = mesh->sgeo[base + NYID]; dfloat nz = mesh->sgeo[base + NZID]; dfloat wsJ = mesh->sgeo[base + WSJID]; dfloat hinv = mesh->sgeo[base + IHID]; // form negative trace terms in IPDG int idnM = n * mesh->Np + vidM; int idmM = m * mesh->Np + vidM; int idmP = m * mesh->Np + vidP; dfloat dlndxM = drdxM * Br[idnM] + dsdxM * Bs[idnM] + dtdxM * Bt[idnM]; dfloat dlndyM = drdyM * Br[idnM] + dsdyM * Bs[idnM] + dtdyM * Bt[idnM]; dfloat dlndzM = drdzM * Br[idnM] + dsdzM * Bs[idnM] + dtdzM * Bt[idnM]; dfloat ndotgradlnM = nx * dlndxM + ny * dlndyM + nz * dlndzM; dfloat lnM = B[idnM]; dfloat dlmdxM = drdxM * Br[idmM] + dsdxM * Bs[idmM] + dtdxM * Bt[idmM]; dfloat dlmdyM = drdyM * Br[idmM] + dsdyM * Bs[idmM] + dtdyM * Bt[idmM]; dfloat dlmdzM = drdzM * Br[idmM] + dsdzM * Bs[idmM] + dtdzM * Bt[idmM]; dfloat ndotgradlmM = nx * dlmdxM + ny * dlmdyM + nz * dlmdzM; dfloat lmM = B[idmM]; dfloat dlmdxP = drdxP * Br[idmP] + dsdxP * Bs[idmP] + dtdxP * Bt[idmP]; dfloat dlmdyP = drdyP * Br[idmP] + dsdyP * Bs[idmP] + dtdyP * Bt[idmP]; dfloat dlmdzP = drdzP * Br[idmP] + dsdzP * Bs[idmP] + dtdzP * Bt[idmP]; dfloat ndotgradlmP = nx * dlmdxP + ny * dlmdyP + nz * dlmdzP; dfloat lmP = B[idmP]; dfloat penalty = elliptic->tau * hinv; Anm += -0.5 * wsJ * lnM * ndotgradlmM; // -(ln^-, N.grad lm^-) Anm += -0.5 * wsJ * ndotgradlnM * lmM; // -(N.grad ln^-, lm^-) Anm += +0.5 * wsJ * penalty * lnM * lmM; // +((tau/h)*ln^-,lm^-) dlong eP = mesh->EToE[eM * mesh->Nfaces + fM]; if (eP < 0) { int qSgn, gradqSgn; int bc = mesh->EToB[fM + mesh->Nfaces * eM]; //raw boundary flag int bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) if(bcType == 1) { // Dirichlet qSgn = -1; gradqSgn = 1; } else if (bcType == 2) { // Neumann qSgn = 1; gradqSgn = -1; } else { // Neumann for now qSgn = 1; gradqSgn = -1; } Anm += -0.5 * gradqSgn * wsJ * lnM * ndotgradlmM; // -(ln^-, -N.grad lm^-) Anm += +0.5 * qSgn * wsJ * ndotgradlnM * lmM; // +(N.grad ln^-, lm^-) Anm += -0.5 * qSgn * wsJ * penalty * lnM * lmM; // -((tau/h)*ln^-,lm^-) } else { AnmP += -0.5 * wsJ * lnM * ndotgradlmP; // -(ln^-, N.grad lm^+) AnmP += +0.5 * wsJ * ndotgradlnM * lmP; // +(N.grad ln^-, lm^+) AnmP += -0.5 * wsJ * penalty * lnM * lmP; // -((tau/h)*ln^-,lm^+) } } if(fabs(AnmP) > tol) { //#pragma omp critical { // remote info dlong eP = mesh->EToE[eM * mesh->Nfaces + fM]; (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eP * mesh->Np + m]; (*A)[nnz].val = AnmP; (*A)[nnz].ownerRank = rankM; ++nnz; } } } if(fabs(Anm) > tol) { //#pragma omp critical { // local block (*A)[nnz].row = globalIds[eM * mesh->Np + n]; (*A)[nnz].col = globalIds[eM * mesh->Np + m]; (*A)[nnz].val = Anm; (*A)[nnz].ownerRank = rankM; ++nnz; } } } // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort((*A), nnz, sizeof(nonZero_t), parallelCompareRowColumn); //*A = (nonZero_t*) realloc(*A, nnz*sizeof(nonZero_t)); *nnzA = nnz; if(rankM == 0) printf("done.\n"); free(globalIds); free(B); free(Br); free(Bs); free(Bt); }
GB_unaryop__identity_int64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_uint32 // op(A') function: GB_tran__identity_int64_uint32 // C type: int64_t // A type: uint32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_uint32 ( int64_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tetrahedron.c
#include "spglib.h" #include "tetrahedron_method.h" #include <stdio.h> #include <stdlib.h> static void test_tetrahedron_method(void); static void mat_copy_matrix_d3(double a[3][3], double b[3][3]); static double mat_get_determinant_d3(double a[3][3]); static int mat_inverse_matrix_d3(double m[3][3], double a[3][3], const double precision); int main(void) { test_tetrahedron_method(); return 0; } /* frequency.dat is in the example directory. */ /* The values in this file are the phonon frequencies of NaCl */ /* with 20x20x20 mesh. Calculation was done with reducing */ /* k-points to the irreducible k-points using phonopy. */ /* (http://phonopy.sf.net/) */ static void test_tetrahedron_method(void) { printf("*** Example of tetrahedron method of NaCl to calculate DOS ***:\n"); printf("Read data from frequency.dat and write DOS to dos.dat.\n"); int i, j, k, l, q, r; /* NaCl 20x20x20 mesh */ double lattice[3][3] = { {0.000000000000000, 2.845150738087836, 2.845150738087836}, {2.845150738087836, 0.000000000000000, 2.845150738087836}, {2.845150738087836, 2.845150738087836, 0.000000000000000} }; double position[][3] = {{0, 0, 0}, {0.5, 0.5, 0.5}}; int types[] = {1, 2}; int num_atom = 2; int m = 20; int mesh[] = {m, m, m}; int num_gp = mesh[0] * mesh[1] * mesh[2]; int is_shift[] = {0, 0, 0}; int grid_address[num_gp][3]; int grid_mapping_table[num_gp]; int weights[num_gp]; int num_ir = spg_get_ir_reciprocal_mesh(grid_address, grid_mapping_table, mesh, is_shift, 1, lattice, position, types, num_atom, 1e-5); int ir_gp[num_ir]; int ir_weights[num_ir]; int gp_ir_index[num_gp]; int relative_grid_address[24][4][3]; double rec_lat[3][3]; FILE *fp; char * line = NULL; size_t len = 0; ssize_t read; double frequency[num_ir * num_atom * 3]; double max_f, min_f; double t_omegas[24][4]; int g_addr[3]; int gp; int num_freqs = 201; double dos[num_freqs]; double integral_dos[num_freqs]; double omegas[num_freqs]; double iw; for (i = 0; i < num_gp; i++) { weights[i] = 0; } for (i = 0; i < num_gp; i++) { weights[grid_mapping_table[i]]++; } j = 0; for (i = 0; i < num_gp; i++) { if (weights[i] != 0) { ir_gp[j] = i; ir_weights[j] = weights[i]; gp_ir_index[i] = j; j++; } else { gp_ir_index[i] = gp_ir_index[grid_mapping_table[i]]; } } printf("Number of irreducible k-points: %d\n", num_ir); mat_inverse_matrix_d3(rec_lat, lattice, 1e-5); thm_get_relative_grid_address(relative_grid_address, rec_lat); /* for (i = 0; i < 24; i++) { */ /* for (j = 0; j < 4; j++) { */ /* printf("[%2d %2d %2d] ", */ /* relative_grid_address[i][j][0], */ /* relative_grid_address[i][j][1], */ /* relative_grid_address[i][j][2]); */ /* } */ /* printf("\n"); */ /* } */ fp = fopen("frequency.dat", "r"); for (i = 0; i < num_ir * num_atom * 3; i++) { read = getline(&line, &len, fp); if (read == -1) { break; } frequency[i] = strtod(line, NULL); } fclose(fp); max_f = frequency[0]; min_f = frequency[0]; for (i = 0; i < num_ir * num_atom * 3; i++) { if (max_f < frequency[i]) { max_f = frequency[i]; } if (min_f > frequency[i]) { min_f = frequency[i]; } } printf("Number of frequencies: %d\n", i); #pragma omp parallel for private(j, k, l, q, r, g_addr, gp, t_omegas, iw) for (i = 0; i < num_freqs; i++) { dos[i] = 0; integral_dos[i] = 0; omegas[i] = min_f + (max_f - min_f) / (num_freqs - 1) * i; for (j = 0; j < num_ir; j++) { for (k = 0; k < num_atom * 3; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[ir_gp[j]][r] + relative_grid_address[l][q][r]; } gp = spg_get_grid_point_from_address(g_addr, mesh); t_omegas[l][q] = frequency[gp_ir_index[gp] * num_atom * 3 + k]; } } iw = thm_get_integration_weight(omegas[i], t_omegas, 'J'); dos[i] += iw * ir_weights[j]; iw = thm_get_integration_weight(omegas[i], t_omegas, 'I'); integral_dos[i] += iw * ir_weights[j]; } } } fp = fopen("dos.dat", "w"); for (i = 0; i < num_freqs; i++) { fprintf(fp, "%f %f\n", omegas[i], dos[i] / num_gp); } fprintf(fp, "\n\n"); for (i = 0; i < num_freqs; i++) { fprintf(fp, "%f %f\n", omegas[i], integral_dos[i] / num_gp); } fclose(fp); } static void mat_copy_matrix_d3(double a[3][3], double b[3][3]) { a[0][0] = b[0][0]; a[0][1] = b[0][1]; a[0][2] = b[0][2]; a[1][0] = b[1][0]; a[1][1] = b[1][1]; a[1][2] = b[1][2]; a[2][0] = b[2][0]; a[2][1] = b[2][1]; a[2][2] = b[2][2]; } static double mat_get_determinant_d3(double a[3][3]) { return a[0][0] * (a[1][1] * a[2][2] - a[1][2] * a[2][1]) + a[0][1] * (a[1][2] * a[2][0] - a[1][0] * a[2][2]) + a[0][2] * (a[1][0] * a[2][1] - a[1][1] * a[2][0]); } static int mat_inverse_matrix_d3(double m[3][3], double a[3][3], const double precision) { double det; double c[3][3]; det = mat_get_determinant_d3(a); c[0][0] = (a[1][1] * a[2][2] - a[1][2] * a[2][1]) / det; c[1][0] = (a[1][2] * a[2][0] - a[1][0] * a[2][2]) / det; c[2][0] = (a[1][0] * a[2][1] - a[1][1] * a[2][0]) / det; c[0][1] = (a[2][1] * a[0][2] - a[2][2] * a[0][1]) / det; c[1][1] = (a[2][2] * a[0][0] - a[2][0] * a[0][2]) / det; c[2][1] = (a[2][0] * a[0][1] - a[2][1] * a[0][0]) / det; c[0][2] = (a[0][1] * a[1][2] - a[0][2] * a[1][1]) / det; c[1][2] = (a[0][2] * a[1][0] - a[0][0] * a[1][2]) / det; c[2][2] = (a[0][0] * a[1][1] - a[0][1] * a[1][0]) / det; mat_copy_matrix_d3(m, c); return 1; }
GMS_descriptive_statistics.h
#ifndef __GMS_DESCRIPTIVE_STATISTICS_H__ #define __GMS_DESCRIPTIVE_STATISTICS_H__ 081120200922 namespace file_info { const unsigned int gGMS_DESCRIPTIVE_STATISTICS_MAJOR = 1U; const unsigned int gGMS_DESCRIPTIVE_STATISTICS_MINOR = 1U; const unsigned int gGMS_DESCRIPTIVE_STATISTICS_MICRO = 0U; const unsigned int gGMS_DESCRIPTIVE_STATISTICS_FULLVER = 1000U*gGMS_DESCRIPTIVE_STATISTICS_MAJOR+ 100U*gGMS_DESCRIPTIVE_STATISTICS_MINOR+ 10U*gGMS_DESCRIPTIVE_STATISTICS_MICRO; const char * const pgGMS_DESCRIPTIVE_STATISTICS_CREATE_DATE = "08-11-2020 09:22AM +00200 (SUN 08 NOV 2020 GMT+2)"; const char * const pgGMS_DESCRIPTIVE_STATISTICS_BUILD_DATE = __DATE__ ":" __TIME__; const char * const pgGMS_DESCRIPTIVE_STATISTICS_AUTHOR = "CHARLES P. REEVE NATIONAL BUREAU OF STANDARDS, translated to C++ by Bernard Gingold beniekg@gmail.com"; } #include <math.h> #include <cstdint> #include <omp.h> #include "GMS_config.h" #include "GMS_cephes.h" // to eliminate cmath.h implemented by GLIBC // used only in scalar code. // Vector code is handled by SVML calls namespace gms { namespace math { /* C C----------------------------------------------------------------------- C BARTLT WRITTEN BY CHARLES P. REEVE, STATISTICAL ENGINEERING C DIVISION, NATIONAL BUREAU OF STANDARDS, GAITHERSBURG, C MARYLAND 20899 C C FOR: PERFORMING BARTLETT'S TEST FOR HOMOGENEITY OF VARIANCES ON C THREE OR MORE VARIANCES (THE F TEST SHOULD BE USED IN THE CASE C OF TWO VARIANCES). IF THE INPUT PARAMETERS ARE NOT VALID AN C ERROR FLAG IS SET AND NOTHING FURTHER IS COMPUTED, OTHERWISE C THE FOLLOWING ARE COMPUTED: C C 1) THE CHI-SQUARED STATISTIC (CH2), C 2) THE CUMULATIVE DISTRIBUTION FUNCTION OF THE CHI-SQUARED C DISTRIBUTION EVALUATED AT CH2 (CH2CDF), AND C 3) THE POOLED VARIANCE (VARP) AND ITS CORRESPONDING C DEGREES OF FREEDOM (DFP) C C THE VALUES IN 3) MAY BE USEFUL ONLY IF THE VARIANCES ARE C DETERMINED TO BE EQUAL. THE VALUE OF CH2CDF IS GOOD TO SIX C DECIMAL PLACES. C C SUBPROGRAMS CALLED: CDFGAM (GAMMA CUMULATIVE DISTRIBUTION FUNCTION) C C CURRENT VERSION COMPLETED FEBRUARY 3, 1987 C C REFERENCES: C C 1) SNEDECOR, GEORGE W. AND COCHRAN, WILLIAM G., 'STATISTICAL C METHODS', 6TH EDITION, IOWA STATE UNIVERSITY PRESS, PP. 296-298. C C 2) BROWNLEE, K.A., 'STATISTICAL THEORY AND METHODOLOGY IN SCIENCE C AND ENGINEERING', JOHN WILEY & SONS, 1960, PP. 225-227. C----------------------------------------------------------------------- C DEFINITION OF PASSED PARAMETERS: C C * VAR = VECTOR (LENGTH N) OF VARIANCES (REAL) C C * DF = VECTOR (LENGTH N) OF DEGREES OF FREEDOM CORRESPONDING C TO THE VARIANCES (REAL) C C * N = NUMBER OF VARIANCES [>2] (INTEGER) C C CH2 = THE CHI-SQUARED STATISTIC ASSOCIATED WITH BARTLETT'S TEST C (REAL) C C CH2CDF = THE CUMULATIVE DISTRIBUTION FUNCTION OF THE CHI-SQUARED C DISTRIBUTION WITH N-1 DEGREES OF FREEDOM EVALUATED AT CH2 C (REAL) C C VARP = THE POOLED VARIANCE DETERMINED FROM THE N VARIANCES (REAL) C C DFP = THE DEGREES OF FREEDOM ASSOCIATED WITH THE POOLED C VARIANCE (REAL) C C IFLAG = THE ERROR FLAG ON OUTPUT (INTEGER) INTERPRETATION: C 0 -> NO ERRORS DETECTED C 1,2 -> ERROR FLAGS FROM SUBROUTINE CDFGAM C 3 -> N<3 C 4 -> AT LEAST ONE DF(I) IS <= 0.0 C 5 -> AT LEAST ONE VARIANCE(I) IS < 0.0 C C * INDICATES PARAMETERS REQUIRING INPUT VALUES C----------------------------------------------------------------------- C */ /* __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void bartlt(float * __restrict __ATTR_ALIGN__(64) var, float * __restrict __ATTR_ALIGN__(64) df, const int32_t n, float &ch2, float &ch2cdf, float &varp, float &dfp, int32_t &iflag) { if(__builtin_expect(n < 3,0)) { iflag = 3; return; } for(int32_t i = 0; i != n; ++i) { if(df[i] <= 0.0f) { iflag = 4; return; } if(var[i] < 0.0f) { iflag = 5; return; } } float a,c,x,alpha,eps; a = 0.0f; varp = 0.0f; c = 0.0f; ch2 = 0.0f; #if defined __ICC || defined __INTEL_COMPILER __assume_aligned(var,64); __assume_aligned(df,64); #elif defined __GNUC__ && !defined __INTEL_COMPILER var = (float*)__builtin_assume_aligned(var,64); df = (float*)__builtin_assume_aligned(df,64); #endif #pragma omp simd reduction(+: a,varp,c,ch2) aligned(df:64,varp,var) \ linear(i:1) unroll partial(8) for(int32_t i = 0; i != n; ++i) { a = a + df[i]; varp = varp + df[i] * varp[i]; c = c + 1.0f/df[i]; ch2 = ch2 + df[i]*logf(var[i]); } varp = varp/a; dfp = a; ch2 = a*ceph_logf(varp)-ch2; a = 1.0f+(c-1.0f/a)/(3.0f*(float)n-1); x = 0.5f*ch2; alpha = 0.5f*(float)n-1; eps = 0.0000001f; // call cdfgam here }*/ /* C C----------------------------------------------------------------------- C CDFBET WRITTEN BY CHARLES P. REEVE, STATISTICAL ENGINEERING C DIVISION, NATIONAL BUREAU OF STANDARDS, GAITHERSBURG, C MARYLAND 20899 C C FOR: COMPUTING THE CUMULATIVE DISTRIBUTION FUNCTION OF THE BETA C DISTRIBUTION (ALSO KNOWN AS THE INCOMPLETE BETA RATIO) TO A C SPECIFIED ACCURACY (TRUNCATION ERROR IN THE INFINITE SERIES). C THE ALGORITHM, DESCRIBED IN REFERENCE 2, IS A MODIFICATION OF C THE ALGORITHM OF REFERENCE 1. THREE FEATURES HAVE BEEN ADDED: C C 1) A PRECISE METHOD OF MEETING THE TRUNCATION ACCURACY, C 2) A CONSTANT W USED IN DETERMINING FOR WHICH X VALUES THE C RELATION I(X,P,Q) = 1 - I(1-X,Q,P) IS TO BE USED, AND C 3) A CONSTANT UFLO >= THE UNDERFLOW LIMIT ON THE COMPUTER. C C SUBPROGRAMS CALLED: DGAMLN (LOG OF GAMMA FUNCTION) C C CURRENT VERSION COMPLETED OCTOBER 24, 1986 C C REFERENCES: C C 1) MAJUMDER, K.L. AND BHATTACHARJEE, G.P., 'THE INCOMPLETE BETA C INTEGRAL', ALGORITHM AS 63, APPLIED STATISTICS, VOL. 22, NO. 3, C 1973, PP. 409-411. C C 2) REEVE, CHARLES P., 'AN ALGORITHM FOR COMPUTING THE BETA C.D.F. C TO A SPECIFIED ACCURACY', STATISTICAL ENGINEERING DIVISION C NOTE 86-3, OCTOBER 1986. C----------------------------------------------------------------------- C DEFINITION OF PASSED PARAMETERS: C C * X = VALUE AT WHICH THE C.D.F. IS TO BE COMPUTED (REAL) C C * P = FIRST PARAMETER OF THE BETA FUNCTION (>0) (REAL) C C * Q = SECOND PARAMETER OF THE BETA FUNCTION (>0) (REAL) C C * EPS = THE DESIRED ABSOLUTE ACCURACY OF THE C.D.F. (>0) (REAL) C C IFLAG = ERROR INDICATOR ON OUTPUT (INTEGER) INTERPRETATION: C 0 -> NO ERRORS DETECTED C 1 -> EITHER P OR Q OR EPS IS <= UFLO C 2 -> NUMBER OF TERMS EVALUATED IN THE INFINITE SERIES C EXCEEDS JMAX C C CDFX = THE C.D.F. EVALUATED AT X (REAL) C C * INDICATES PARAMETERS REQUIRING INPUT VALUES C----------------------------------------------------------------------- C */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void cdfbet(float x, float p, float q, float eps, int32_t &iflag, float &cdfx) { constexpr float w = 20.0f; constexpr float uflo = 1.0e-30f; constexpr int32_t jmax = 5000; float xy,yx,pq,qp; float dp,dq,pdfl,u,r,v,yxeps; bool bc1,bc2,bc3,ll; bc1 = p <= uflo; bc2 = q <= uflo; bc3 = eps <= uflo; // CHECK FOR VALIDITY OF ARGUMENTS P, Q, AND EPS if(__builtin_expect(bc1,0) || __builtin_expect(bc2,0) || __builtin_expect(bc3,0)) { iflag = 1; return; } iflag = 0; if(__builtin_expect(x <= 0.0f,0)) { return; } ll = false; if(x >= 1.0f) { cdfx = 1.0f; } else { // SWITCH ARGUMENTS IF NECESSARY ll = p+w >= (p+q+2.0f*w)*x; if(ll) { xy = x; yx = 1.0f-xy; pq = p; qp = q; } else { yx = x; xy = 1.0f-yx; qp = p; pq = q; } // EVALUATE THE BETA P.D.F. AND CHECK FOR UNDERFLOW dp = (double)(pq-1.0f)*ceph_log((double)xy)-dgamln(pq); dq = (double)(qp-1.0f)*ceph_log((double)yx)-dgamln(qp); pdfl = (float)(dgamln(pq+qp)+dp+dq); if(pdfl < log(uflo)) { ; // ? } else { u = ceph_expf(pdfl)*xy/pq; r = xy/yx; label_10: if(qp <= 1.0f) goto label_20; // INCREMENT PQ AND DECREMENT QP if(u <= eps*(1.0f-(pq+qp)*xy/(pq+1.0f))) goto label_40; cdfx = cdfx+u; pq = pq+1.0f; qp = qp-1.0f; u = qp*r*u/pq; goto label_10; label_20: v = yx*u; yxeps = yx*eps; // INCREMENT PQ for(int32_t j = 0; i != jmax; ++j) { if(v <= yxeps) goto label_40; cdfx = cdfx+v; pq = pq+1.0f; v = (pq+qp-1.0f)*xy*v/pq; } iflag = 2; } label_40: if(!ll) cdfx = 1.0f-cdfx; } } /* C----------------------------------------------------------------------- C CDFDNF WRITTEN BY CHARLES P. REEVE, STATISTICAL ENGINEERING C DIVISION, NATIONAL BUREAU OF STANDARDS, GAITHERSBURG, C MARYLAND 20899 C C FOR: COMPUTING THE CUMULATIVE DISTRIBUTION FUNCTION OF THE DOUBLY C NONCENTRAL F DISTRIBUTION TO A SPECIFIED ACCURACY (TRUNCATION C ERROR IN THE INFINITE SERIES REPRESENTATION GIVEN BY EQUATION C 2.2 IN REFERENCE 1 BELOW). THE BETA C.D.F. ROUTINE IS CALLED C AT MOST TWO TIMES. FURTHER VALUES OF THE BETA C.D.F. ARE C OBTAINED FROM RECURRENCE RELATIONS GIVEN IN REFERENCE 2. C REFERENCE 3 GIVES A DETAILED DESCRIPTION OF THE ALGORITHM C HEREIN. C C THIS PROGRAM MAY ALSO BE EFFICIENTLY USED TO COMPUTE THE C CUMULATIVE DISTRIBUTION FUNCTIONS OF THE SINGLY NONCENTRAL C AND CENTRAL F DISTRIBUTIONS BY SETTING THE APPROPRIATE C NONCENTRALITY PARAMETERS EQUAL TO ZERO. C C CHECKS ARE MADE TO ASSURE THAT ALL PASSED PARAMETERS ARE C WITHIN VALID RANGES AS GIVEN BELOW. NO UPPER LIMIT IS SET C FOR THE NONCENTRALITY PARAMETERS, BUT VALUES UP TO ABOUT C 10,000 CAN BE HANDLED WITH THE CURRENT DIMENSION LIMITS. THE C COMPUTED VALUE CDFX IS VALID ONLY IF IFLAG=0 ON RETURN. C C NOTE: IN EQUATION 2.2 OF REFERENCE 1 THE AUTHOR HAS MISTAKENLY C REVERSED THE ARGUMENTS OF THE INCOMPLETE BETA FUNCTION. C THEY SHOULD READ [(M/2)+R,(N/2+S)] WHERE M AND N ARE THE C DEGREES OF FREEDOM ASSOCIATED WITH THE NUMERATOR AND C DENOMINATOR RESPECTIVELY OF THE F STATISTIC. TO FURTHER C CONFUSE THE ISSUE, THE AUTHOR HAS REVERSED THE USAGE OF C M AND N IN SECTION 1 OF THE PAPER. C C NOTE: IN SUBROUTINE EDGEF THE DOUBLE PRECISION CONSTANT DEUFLO IS C THE EXPONENTIAL UNDERFLOW LIMIT WHOSE CURRENT VALUE IS SET C AT -69D0. ON A COMPUTER WHERE DEXP(-69D0) CAUSES UNDERFLOW C THIS LIMIT SHOULD BE CHANGED. C C SUBPROGRAMS CALLED: CDFBET (BETA C.D.F.) C DGAMLN (DOUBLE PRECISION LOG OF GAMMA FUNCTION) C POISSF, EDGEF (ATTACHED) C C CURRENT VERSION COMPLETED SEPTEMBER 29, 1988 C C REFERENCES: C C 1. BULGREN, W.G., 'ON REPRESENTATIONS OF THE DOUBLY NONCENTRAL F C DISTRIBUTION', JOURNAL OF THE AMERICAN STATISTICAL ASSOCIATION, C MARCH 1971, VOLUME 66, NO. 333, PP. 184-186. C C 2. ABRAMOWITZ, MILTON, AND STEGUN, IRENE A., 'HANDBOOK OF C MATHEMATICAL FUNCTIONS', NATIONAL BUREAU OF STANDARDS APPLIED C MATHEMATICS SERIES 55, NOVEMBER 1970, P. 944. C C 3. REEVE, CHARLES P., 'AN ALGORITHM FOR COMPUTING THE DOUBLY C NONCENTRAL F C.D.F. TO A SPECIFIED ACCURACY', STATISTICAL C ENGINEERING DIVISION NOTE 86-4, NOVEMBER 1986. C----------------------------------------------------------------------- C DEFINITION OF PASSED PARAMETERS: C C * X = VALUE (>=0) AT WHICH THE C.D.F. IS TO BE COMPUTED (REAL) C C * DF1 = DEGREES OF FREEDOM (>0) IN THE NUMERATOR (REAL) C C * DF2 = DEGREES OF FREEDOM (>0) IN THE DENOMINATOR (REAL) C C * ALAMB1 = THE NONCENTRALITY PARAMETER (>=0) FOR THE NUMERATOR C (REAL) [EQUAL TO ZERO FOR THE CENTRAL F DISTRIBUTION] C C * ALAMB2 = THE NONCENTRALITY PARAMETER (>=0) FOR THE DENOMINATOR C (REAL) [EQUAL TO ZERO FOR THE SINGLY NONCENTRAL F AND C CENTRAL F DISTRIBUTIONS] C C * EPS = THE DESIRED ABSOLUTE ACCURACY OF THE C.D.F. (REAL) C [1 >= EPS >= 10**(-10)] C C IFLAG = ERROR INDICATOR ON OUTPUT (INTEGER) INTERPRETATION: C 0 -> NO ERRORS DETECTED C 1,2 -> ERROR FLAGS FROM SUBROUTINE CDFBET C 3 -> EITHER ALAMB1 OR ALAMB2 IS < 0 C 4 -> EITHER DF1 OR DF2 IS <= 0 C 5 -> EPS IS OUTSIDE THE RANGE [10**(-10),1] C 6 -> VECTOR DIMENSIONS ARE TOO SMALL - INCREASE NX C C CDFX = THE DOUBLY NONCENTRAL F C.D.F. EVALUATED AT X (REAL) C C * INDICATES PARAMETERS REQUIRING INPUT VALUES C----------------------------------------------------------------------- C */ #include "GMS_simd_memops.h" __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void cdfdnf(float x, float df1, float df2, float alamb1, float alamb2, float eps, int32_t &iflag, float &cdfx) { using namespace gms::common; // CHECK VALIDITY OF ARGUMENTS if(__builtin_expect(alamb1<0.0f,0) || __builtin_expect(alamb2<0.0f,0)) { iflag = 3; return; } if(__builtin_expect(df1<=0.0f,0) || __builtin_expect(df2<=0.0f,0)) { iflag = 4; return; } if(__builtin_expect(eps>1.0f,0) || __builtin_expect(eps<1.0e-10f,0)) { iflag = 5; return; } iflag = 0; constexpr int32_t nx = 1008; #if defined __AVX512F__ __attribute__((aligned(64))) float bfi[nx]; __attribute__((aligned(64))) float bfj[nx]; __attribute__((aligned(64))) float poi[nx]; __attribute__((aligned(64))) float poj[nx]; #else __attribute__((aligned(32))) float bfi[nx]; __attribute__((aligned(32))) float bfj[nx]; __attribute__((aligned(32))) float poi[nx]; __attribute__((aligned(32))) float poj[nx]; #endif float eps3,fa,ga,fb,gb,fc,xx,yy; int32_t imin,ni,jmin,nj; // SET ERROR CRITERION FOR THE BETA C.D.F. (PECULIAR TO CDFBET) eps3 = 0.001f*eps; fa = 0.5f*alamb1; ga = 0.5f*alamb2; fb = 0.5f*df1; gb = 0.5f*df2; yy = df2/(df2+df1*x); if(yy>=1.0f){ return; } xx = 1.0f-yy; if(xx>=1.0f) { cdfx = 1.0f; return; } // COMPUTE POISSON PROBABILITIES IN VECTORS POI AND POJ #if (GMS_INIT_ARRAYS) == 1 #if defined __AVX512F__ avx512_init_unroll4x_ps(&poi[0],nx,0.0f); #else avx256_init_unroll4x_ps(&poi[0],nx,0.0f); #endif #endif poissf(fa,eps,imin,ni,poi,nx,iflag); if(__builtin_expect(iflag != 0,0)) { return; } fc = fb+(float)imin; #if (GMS_INIT_ARRAYS) == 1 #if defined __AVX512F__ avx512_init_unroll4x_ps(&poj[0],nx,0.0f); #else avx256_init_unroll4x_ps(&poj[0],nx,0.0f); #endif #endif poissf(ga,eps,jmin,nj,poj,nx,iflag); if(__builtin_expect(iflag != 0,0)) { return; } gc = gb+(float)jmin; // COMPUTE BETA C.D.F. BY RECURRENCE WHEN I=IMIN AND J=JMIN TO JMAX #if (GMS_INIT_ARRAYS) == 1 #if defined __AVX512F__ avx512_init_unroll4x_ps(&bfj[0],nx,0.0f); #else avx256_init_unroll4x_ps(&bfj[0],nx,0.0f); #endif #endif edgef(nj,gc,fc,yy,xx,bfj,cdfx,poj,poi,eps3,iflag,1); if(__builtin_expect(ni<=1,0) || __builtin_expect(iflag != 0,0)) { return; } //COMPUTE BETA C.D.F. BY RECURRENCE WHEN J=JMIN AND I=IMIN TO IMAX #if (GMS_INIT_ARRAYS) == 1 #if defined __AVX512F__ avx512_init_unroll4x_ps(&bfi[0],nx,0.0f); #else avx256_init_unroll4x_ps(&bfi[0],nx,0.0f); #endif #endif bfi[0] = bfj[0]; edgef(ni,fc,gc,xx,yy,bfi,cdfx,poi,poj,eps3,iflag,2); if(__builtin_expect(nj<=1,0) || __builtin_expect(iflag != 0,0)) { return; } // COMPUTE BETA C.D.F. BY RECURRENCE WHEN I>IMIN AND J>JMIN for(int32_t i = 1; i != ni; ++i) { bfj[0] = bfi[i]; float tmp = poi[i]; for(int32_t j = 1; j != nj; ++j) { bfj[j] = xx*bfj[j]+yy*bfj[j-1]; cdfx = cdfx+tmp+poj[j]*bfj[j]; } } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void poissf(float alamb, const float eps, int32_t l, int32_t &nspan, float * __restrict __ATTR_ALIGN__(32) v, const int32_t nv, int32_t &iflag) { double dal,dk,dlimit,dsum; float pl,pk; int32_t k,nk,nl,inc; dlimit = 1.0-0.5*(double)eps; k = (int32_t)alamb; l = k+1; if(alamb==0.0) { pl = 1.0f; } else { dal = (double)alamb; dk = (double)k; pl = (float)(ceph_exp(dk*log(dal)-dal-dgamln((float)k+1))); } } #include <limits> #include <algorithm> __ATTR_PURE__ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline double dgamln(const float x) { if(x<=0.0) { return std::numeric_limits<double>::signaling_NaN(); } double c,dx,q,r,xmin,xn,result; constexpr double xmin = 6.894; constexpr double absac = 1.0e-15; constexpr double C = 0.918938533204672741780329736; constexpr double B1 = 0.833333333333333333333333333E-1; constexpr double B2 = -0.277777777777777777777777778E-2; constexpr double B3 = 0.793650793650793650793650794E-3; constexpr double B4 = -0.595238095238095238095238095E-3; constexpr double B5 = 0.841750841750841750841750842E-3; constexpr double B6 = -0.191752691752691752691752692E-2; constexpr double B7 = 0.641025641025641025641025641E-2; constexpr double B8 = -0.295506535947712418300653595E-1; int32_t n; dx = (double)x; n = std::max(0,(int32_t)(xmin-dx+1.0)); xn = dx+(double)n; r = 1.0/xn; q = r*r; result = 0.0; result = r*(b1+q*(b2+q*(b3+q*(b4+q*(b5+q*(b6+q*(b7+q*b8)))))))+c +(xn-0.5)*log(xn)-xn; // USE RECURRENCE RELATION WHEN N>0 (X<XMIN) if(n>0) { q = 1.0; for(int32_t i = 0; i != n-1; ++i) { q = q*(dx+(double)i) } result -= ceph_log(q); } return (result); } /* PURPOSE--THIS SUBROUTINE COMPUTES THE C SAMPLE AUTOCORRELATION COEFFICIENT C OF THE DATA IN THE INPUT VECTOR X. C THE SAMPLE AUTOCORRELATION COEFFICIENT = THE CORRELATION C BETWEEN X(I) AND X(I+1) OVER THE ENTIRE SAMPLE. C THE AUTOCORRELATION COEFFICIENT COEFFICIENT WILL BE A C SINGLE PRECISION VALUE BETWEEN -1.0 AND 1.0 C (INCLUSIVELY). C INPUT ARGUMENTS--X = THE SINGLE PRECISION VECTOR OF C (UNSORTED) OBSERVATIONS. C --N = THE INTEGER NUMBER OF OBSERVATIONS C IN THE VECTOR X. C --IWRITE = AN INTEGER FLAG CODE WHICH C (IF SET TO 0) WILL SUPPRESS C THE PRINTING OF THE C SAMPLE AUTOCORRELATION COEFFICIENT C AS IT IS COMPUTED; C OR (IF SET TO SOME INTEGER C VALUE NOT EQUAL TO 0), C LIKE, SAY, 1) WILL CAUSE C THE PRINTING OF THE C SAMPLE AUTOCORRELATION COEFFICIENT C AT THE TIME IT IS COMPUTED. C OUTPUT ARGUMENTS--XAUTOC = THE SINGLE PRECISION VALUE OF THE C COMPUTED SAMPLE AUTOCORRELATION C COEFFICIENT. C THIS SINGLE PRECISION VALUE C WILL BE BETWEEN -1.0 AND 1.0 C (INCLUSIVELY). C OUTPUT--THE COMPUTED SINGLE PRECISION VALUE OF THE C SAMPLE AUTOCORRELATION COEFFICIENT. */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline float autoco(float * __restrict __ATTR_ALIGN__(64) x, const int32_t n) { float xautoco = 0.0f; register float xbar,xbar1,xbar2,sum1,sum2,sum3; float an; int32_t nm1; register int32_t ip1; an = (float)n; xbar = 0.0f; #if defined __INTEL_COMPILER __assume_aligned(x,64); #elif defined __GNUC__ && !defined __INTEL_COMPILER x = (float*)__builtin_assume_aligned(x,64); #endif #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:xbar) aligned(x:64) linear(i:1) \ unroll partial(6) for(int32_t i = 0; i != n; ++i) { xbar = xbar+x[i]; } xbar1 = xbar-x[n-1]; sum1 = 0.0f; xbar1 = xbar1/(an-1.0f); sum2 = 0.0f; xbar2 = xbar-x[0]; sum3 = 0.0f; xbar2 = xbar2/(an-1.0f); nm1 = n-1; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:sum1,sum2,sum3) private(tx1,tip1) \ aligned(x:64) linear(i:1) unroll partial(8) for(int32_t i = 0; i != nm1; ++i) { ip1 = i+1; register float txi = x[i]; register float tip1 = x[ip1]; sum1 = sum1+(txi-xbar1)*(tip1-xbar2); sum2 = sum2+(txi-xbar1)*(txi-xbar); sum3 = sum3+(tip1-xbar2)*(tip1-xbar2); } xautoc = sum1/(ceph_sqrtf(sum2*sum3)); return (xautoc); } /* PURPOSE--THIS SUBROUTINE COMPUTES THE C SAMPLE RELATIVE STANDARD DEVIATION C OF THE DATA IN THE INPUT VECTOR X. C THE SAMPLE RELATIVE STANDARD DEVIATION = (THE SAMPLE C STANDARD DEVIATION)/(THE SAMPLE MEAN). C THE DENOMINATOR N-1 IS USED IN COMPUTING THE C SAMPLE STANDARD DEVIATION. C THE SAMPLE RELATIVE STANDARD DEVIATION IS ALTERNATIVELY C REFERRED TO AS THE SAMPLE COEFFICIENT OF VARIATION. ***Based on Fortran DATAPAC*** */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline float relsd(float * __restrict __attribute__((aligned(64))) x, const int32_t n) { if(n < 0 || n == 1) { return;} register float sum = 0.0f; float an = (float)n; register float xmean = 0.0f; float sd = 0.0f; float var = 0.0f; #if defined __INTEL_COMPILER __assume_aligned(x,64); #elif defined __GNUC__ && !defined __INTEL_COMPILER x = (float*)__builtin_assume_aligned(x,64); #endif #if defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:sum) aligned(x:64) linear(i:1) \ unroll partial(6) for(int32_t i = 0; i != n; ++i) { sum = sum+x[i]; } xmean = sum/an; sum = 0.0f; #if defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:sum) private(t) aligned(x:64) linear(i:1) \ unroll partial(6) for(int32_t i = 0; i != n; ++i) { register float t = (x[i]-xmean)*(x[i]-xmean); sum = sum + t; } var = sum/(an-1.0f); sd = ceph_sqrtf(var); return(100.0f*sd/xmean); } /* PURPOSE-- THIS SUBROUTINE COMPUTES THE C SAMPLE VARIANCE (WITH DENOMINATOR N-1) C OF THE DATA IN THE INPUT VECTOR X. C THE SAMPLE VARIANCE = (THE SUM OF THE C SQUARED DEVIATIONS ABOUT THE SAMPLE MEAN)/(N-1). ***Based on Fortran DATAPAC*** */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline float var(float * __restrict __attribute__((aligned(64))) x, const int32_t n) { register float sum = 0.0f; register float xmean = 0.0f; float xvar = 0.0f; float an = (float)n; #if defined __INTEL_COMPILER __assume_aligned(x,64); #elif defined __GNUC__ && !defined __INTEL_COMPILER x = (float*)__builtin_assume_aligned(x,64); #endif #if defined _INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:sum) aligned(x:64) linear(i:1) \ unroll partial(6) for(int32_t i = 0; i != n; ++i) { sum = sum+x[i]; } xmean = sum/an; sum = 0.0f; #if defined __INTEL_COMPILER #pragma code_align(32) #pragma omp simd reduction(+:sum) private(xi) aligned(x:64) linear(i:1) \ unroll partial(6) for(int31_t i = 0; i != n; ++i) { //register float t = (x[i]-xmean)*(x[i]-xmean); register float xi = x[i]; sum = sum+(xi-xmean)*(xi-xmean); } xvar = sum/(an-1.0f); return (xvar); } /* C----------------------------------------------------------------------- C SKEKUR WRITTEN BY CHARLES P. REEVE C C FOR: COMPUTING SKEWNESS AND KURTOSIS FOR ENTRIES NLO THROUGH NHI C IN VECTOR Y. THE VALUES MAY BE CENTERED ABOUT EITHER THE C MEAN (IOPT <> 0) OR ABOUT ZERO (IOPT = 0). THE TRADITIONAL C DIVISIOR OF N (NOT N-1) IS USED WHEN THE MEAN IS ESTIMATED. C C SUBPROGRAMS CALLED: -NONE- Ported to C++ (STSPAC) */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void skewness_kurtosis(float * __restrict __attribute__((aligned(64))) y, const int32_t nlo, const int32_t nhi, float &yskew, float &ykurt, const int32_t iopt) { __attribute__((aligned(16))) struct { float d; float t2; float t3; float t4; }Datum; register float s; float rn; rn = (float)nhi-nlo+1; #if defined __INTEL_COMPILER __assume_aligned(y,64); #elif defined __GNUC__ && !defined __INTEL_COMPILER x = (float*)__builtin_assume_aligned(x,64); #endif if(iotp==0) { s = 0.0f; } else { s = 0.0f; #if defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:s) aligned(x:64) linear(x:1) \ unroll partial(8) for(int32_t i = nlo; i != nhi; ++i) { s = s+y[i]; } s = s/rn; } Datum dat; dat.d = 0.0f; dat.t2 = 0.0f; dat.t3 = 0.0f; dat.t4 = 0.0; for(int32_t i = nlo; i != nhi; ++i) { dat.d = y[i] - s; dat.t2 = dat.t2+dat.d*dat.d; dat.t3 = dat.t3+dat.d*dat.d*dat.d; dat.t4 = dat.t4+dat.d*dat.d*dat.d*dat.d; } yskew = ceph_sqrtf(rn)*dat.t3/ceph_powf(dat.t2,1.5f); ykurt = rn*dat.t4/(dat.t2*dat.t2); } /* C----------------------------------------------------------------------- C REJ1 WRITTEN BY CHARLES P. REEVE, STATISTICAL ENGINEERING C DIVISION, NATIONAL INSTITUTE OF STANDARDS AND TECHNOLOGY, C GAITHERSBURG, MARYLAND 20899 C C FOR: COMPUTING THE MEAN AND STANDARD DEVIATION OF A SAMPLE OF C 'NORMAL' DATA IN WHICH OUTLIERS MAY BE PRESENT. OUTLIERS ARE C FIRST REJECTED BY A PROCEDURE BASED ON THE SHORTEST INTERVAL C COVERING HALF THE POINTS. THE PROCEDURE IS ITERATED UNTIL C C 1) A USER-SPECIFIED NUMBER OF PASSES OCCURS, OR C 2) THE PROPORTION OF VALUES REJECTED IN A GIVEN PASS IS C 0.01 OR LESS. C C SIMULATION STUDIES ON NORMAL DATA WERE USED TO DETERMINE C THE APPROPRIATE VALUES OF CONSTANTS IN THIS PROGRAM. THEY C WERE CHOSEN SO THAT, ON THE FIRST PASS, THE EXPECTED PROPOR- C TION OF 'GOOD' VALUES REJECTED WAS 0.01 REGARDLESS OF SAMPLE C SIZE. WHEN THE NUMBER OF PASSES ARE NOT LIMITED, THE ACTUAL C PROPORTION OF VALUES REJECTED WAS FOUND TO BE 0.010 TO 0.012 C FOR ALL SAMPLE SIZES. C C THE PROCEDURE WAS ORIGINALLY DESIGNED FOR USE ON LARGE SETS C OF 'NORMAL' DATA ASYMMETRICALLY CONTAMINATED WITH UP TO 50% C OUTLIERS. ITS BEHAVIOR WAS EXAMINED FOR SAMPLE SIZES OF 15 C TO 10,000 AND IT APPEARS TO WORK WELL. WHEN THE SAMPLE SIZE C IS 25 OR LESS, HOWEVER, THE USER MAY WANT TO CONSIDER THE C WELL-ESTABLISHED DIXON TEST AS AN ALTERNATIVE. THAT TEST IS C DISCUSSED IN MANY STATISTICS BOOKS. */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void rej1(const float * __restrict __attribute__((aligned(64))) y, const int32_t n, int32_t &npass, int32_t * __restrict __attribute__((aligned(64))) nrej, float &smean, float &ssd, int32_t &iflag) { constexpr float CNORM = 1.349f; constexpr float C1 = 2.576f; constexpr float C2 = 9.573f; constexpr float C3 = -3.013f; constexpr float C4 = -0.6989f; constexpr float C5 = 2.576f; constexpr float C6 = 7.889f; constexpr float C7 = 1.687f; constexpr float C8 = -0.6729f; float sigmlt,rna,rmin,bound; register float r; int32_t nadj,nit,l,nlo,nhi,ngood; register int32_t k; bool lg = false; // CHECK FOR VALIDITY OF INPUT VALUES if(n<15) { iflag = 1; } else if(npass<1){ iflag = 2; } else { #if defined __INTEL_COMPILER __assume_aligned(y,64); __assume_aligned(nrej,64); #elif defined __GNUC__ && !defined __INTEL_COMPILER y = (float*)__builtin_assume_aligned(y,64); nrej = (int32_t*)__builtin_assume_aligned(nrej,64); #endif iflag = 0; // SORT Y-VALUES std::sort(y,y+n); // DETERMINE OUTLIERS BY FINDING THE SHORTEST INTERVAL COVERING // HALF THE POINTS nadj = n; nit = 0; label_10: nit += 1; rna = (float)nadj; if((nadj%2)==0) { sigmlt = C1+C2*ceph_powf(rna+C3,C4); } else { sigmlt = C+C6*ceph_powf(rna+C7,C8); } l = (nadj+1)/2; rmin = y[n]-y[0]; for(int32_t i = 1; i != (n-l); ++i) { r = y[i+l]-y[i]; if(r<=rmin) { rmin = r; k = i; } } smean = 0.5f*(y[k]+y[k+l]); bound = sigmlt*rmin/cnorm; // TRIM OUTLIERS AT LOWER END nlo = 1; label_30: if(smean-y[nlo]>bound) { nlo += 1; goto label_10; } // TRIM OUTLIERS AT UPPER END nhi = n; label_40: if(y[nhi]-smean>bound) { nhi -= 1; goto label_40; } ngood = nhi-nlo+1; nrej[nit] = nadj-ngood; lg = (nit==npass) || (ngood < 15); int32_t tmp = (int32_t)(0.01*rna); if(nrej[nit]<=1+tmp || lg) { npass = nit; // COMPUTE MEAN AND STANDARD DEVIATION OF NON-REJECTED VALUES smean = 0.0f; #if defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:smean) aligned(y:64) linear(y:1) \ unroll partial(6) for(int32_t i = nlo; i != nhi; ++i) { smean = smean+y[i]; } smean = smean/(float)ngood; ssd = 0.0f; #if defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:ssd) aligned(y:64) linear(y:1) \ unroll partial(6) for(int32_t i = nlo; i != nhi; ++i){ register float yi = y[i]; ssd = ssd+(yi-smean)*(yi-smean); } } else { nadj = ngood; goto label_10; } } } /* COMPUTE MEDIAN OF DATA BETWEEN POSITIONS X(NLO) AND X(NHI) INCLUSIVE. DATA IN THIS REGION OF VECTOR X ARE ALTERED. */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline float median(float * __restrict x, const int32_t nlo, const int32_t nhi) { float result; int32_t i,j; // SORT REGION OF INTEREST IN VECTOR X std::sort(x+nlo,x+nhi); // COMPUTE MEDIAN i = (nlo+nhi)/2; j = (nlo+nhi+1)/2; result = 0.0f; result = (x[i]+x[j])*0.5f; return (result); } /* COMPUTE MEDIAN ABSOLUTE DEVIATION (MAD) OF X FROM C, AN ESTIMATE C--- OF THE CENTER OF DATA, BETWEEN X(NLO) AND X(NHI) INCLUSIVE. VECTOR C--- X IS EXPECTED TO BE SORTED AND IS UNCHANGED BY THIS SUBROUTINE. C--- NOTE: IF THE NUMBER OF ENTRIES OF INTEREST, N, IS EVEN THE MAD IS C--- THE N/2 LARGEST DEVIATION, A SLIGHT OVERESTIMATE. */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline float mad(const float * __restrict __attribute__((aligned(64))) x, const int32_t nlo, const int32_t nhi, const float c) { float amad = 0.0f; register int32_t mlo; int32_t mhi,k; mlo = nlo; mhi = nhi; k = (mhi-mlo)/2; #if defined __INTEL_COMPILER #pragma code_align(32) #endif for(int32_t i = 0; i != k; ++i) { if(x[mhi]+x[mlo]>2.0*c) goto label_10; mlo += 1; goto label_20; label_10: mhi -= 1; } label_20: amad = std::max(std::abs(x[mhi]-c),std::abs(x[mlo]-c)); return (amad); } /* PURPOSE--THIS SUBROUTINE COMPUTES THE C SAMPLE AUTOCORRELATION COEFFICIENT C OF THE DATA IN THE INPUT VECTOR X. C THE SAMPLE AUTOCORRELATION COEFFICIENT = THE CORRELATION C BETWEEN X(I) AND X(I+1) OVER THE ENTIRE SAMPLE. C THE AUTOCORRELATION COEFFICIENT COEFFICIENT WILL BE A C SINGLE PRECISION VALUE BETWEEN -1.0 AND 1.0 C (INCLUSIVELY). */ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline float autoco(float * __restrict __attribute__((aligned(64))) x, const int32_t n) { float xautoc = 0.0f; float an,xbar,xbar1,xbar2; register float sum1,sum2,sum3; int32_t nm1,ip1; an = (float)n; #if defined __INTEL_COMPILER __assume_aligned(x,64); #elif defined __GNUC__ && !defined __INTEL_COMPILER x = (float*)__builtin_assume_aligned(x,64); #endif #if defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:xbar) aligned(x:64) linear(i:1) \ unroll partial(6) for(int32_t i = 0; i != n; ++i) { xbar = xbar+x[i]; } xbar1 = xbar-x[n-1]; xbar1 = xbar1/(an-1.0f); xbar2 = xbar-x[0]; xbar2 = xbar2/(an-1.0f); sum1 = 0.0f; sum2 = 0.0f; sum3 = 0.0f; nm1 = n-1; #if defined __INTEL_COMPILER #pragma code_align(32) #endif #pragma omp simd reduction(+:sum1,sum2,sum3) private(tip1,tx) aligned(x:64) \ linear(i:1) unroll partial(8) for(int32_t i = 0; i != nm1; ++i) { ip1 += 1; register float tip1 = x[ip1]; register float tx = x[i]; sum1 = sum1+(tx-xbar1)*(tip1-xbar2); sum2 = sum2+(tx-xbar1)*(tx-xbar1); sum3 = sum3+(tip1-xbar2)*(tip1-xbar2); } xautoc = sum1/std::sqrtf(sum2*sum3); return (xautoc); } } //math } //gms #endif /*__GMS_DESCRIPTIVE_STATISTICS_H__*/
generator_spgemm_csc_asparse.c
/****************************************************************************** ** Copyright (c) 2015-2017, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ /** * @file * This file is part of GemmCodeGenerator. * * @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors) * * @section LICENSE * Copyright (c) 2012-2014, Technische Universitaet Muenchen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * <DESCRIPTION> */ #include "generator_spgemm_csc_asparse.h" #include "generator_common.h" #include <libxsmm_macros.h> #include <stdio.h> #include <stdlib.h> #include <string.h> LIBXSMM_INTERNAL_API_DEFINITION void libxsmm_sparse_csc_asparse_innerloop_scalar( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_k, const unsigned int i_z, const unsigned int* i_row_idx, const unsigned int* i_column_idx ) { char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_load_sd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_load_sd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_load_ss(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_load_ss(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ss(c%u_%u, _mm_mul_ss(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_ss(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } LIBXSMM_INTERNAL_API_DEFINITION void libxsmm_sparse_csc_asparse_innerloop_two_vector( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_k, const unsigned int i_z, const unsigned int* i_row_idx, const unsigned int* i_column_idx ) { char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&C[(l_n*%u)+%u]));\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&A[%u]));\n", i_k, i_z, i_column_idx[i_k] + i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd((double*)&C[(l_n*%u)+%u], _mm_castps_pd(c%u_%u));\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } LIBXSMM_INTERNAL_API_DEFINITION void libxsmm_sparse_csc_asparse_innerloop_four_vector( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_k, const unsigned int i_z, const unsigned int* i_row_idx, const unsigned int* i_column_idx ) { char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) { unsigned int l_i; unsigned int l_z = i_z; l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d c%u_%u = _mm256_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d a%u_%u = _mm256_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm256_add_pd(c%u_%u, _mm256_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm256_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); for ( l_i = 0; l_i < 2; l_i++ ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, l_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z] ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, l_z, i_column_idx[i_k] + l_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, l_z, i_k, l_z, i_k, l_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z], i_k, l_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_z += 2; } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_loadu_ps(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_loadu_ps(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_ps(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } LIBXSMM_INTERNAL_API_DEFINITION void libxsmm_generator_spgemm_csc_asparse( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const char* i_arch, const unsigned int* i_row_idx, const unsigned int* i_column_idx, const double* i_values ) { char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; unsigned int l_k; unsigned int l_flop_count = 0; LIBXSMM_UNUSED(i_arch); LIBXSMM_UNUSED(i_values); /* loop over columns in C in generated code, we fully unroll inside each column */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n #pragma nounroll_and_jam\n for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* reset the current column in C if needed */ if ( i_xgemm_desc->beta == 0 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); if ( i_xgemm_desc->m > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0f;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc); } libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } /* loop over columns in A, rows in B and fully unroll */ for ( l_k = 0; l_k < (unsigned int)i_xgemm_desc->k; l_k++ ) { unsigned int l_column_elements = i_column_idx[l_k + 1] - i_column_idx[l_k]; unsigned int l_z = 0; l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) || defined(__AVX__)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); if ( l_column_elements > 0 ) { if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m256d b%u = _mm256_broadcast_sd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128d b%u = _mm_loaddup_pd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m128 b%u = _mm_broadcast_ss(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128 b%u = _mm_load_ss(&B[(l_n*%u)+%u]); b%u = _mm_shuffle_ps(b%u, b%u, 0x00);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k, l_k, l_k, l_k); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } /* loop over the columns of A and look for vectorization potential */ for ( l_z = 0; l_z < l_column_elements; l_z++ ) { /* 4 element vector might be possible */ if ( (l_z < (l_column_elements - 3)) && (l_column_elements > 3) ) { /* check for 256bit vector instruction */ if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) && (i_row_idx[i_column_idx[l_k] + l_z] + 2 == i_row_idx[i_column_idx[l_k] + l_z + 2]) && (i_row_idx[i_column_idx[l_k] + l_z] + 3 == i_row_idx[i_column_idx[l_k] + l_z + 3]) && (i_row_idx[i_column_idx[l_k] + l_z + 3] < (unsigned int)i_xgemm_desc->m)) { libxsmm_sparse_csc_asparse_innerloop_four_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx); l_z += 3; /* check for 128bit vector instruction */ } else if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) && (i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) { libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx); l_z++; /* scalare instruction */ } else { if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) { libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx); } } /* 2 element vector might be possible */ } else if ( (l_z < (l_column_elements - 1)) && (l_column_elements > 1)) { /* check for 128bit vector instruction */ if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) && (i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) { libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx); l_z++; /* scalare instruction */ } else { if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) { libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx); } } /* scalar anayways */ } else { if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) { libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx); } } } /* C fallback code */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#else\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* loop over the columns of A */ for ( l_z = 0; l_z < l_column_elements; l_z++ ) { if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[(l_n*%u)+%u] += A[%u] * B[(l_n*%u)+%u];\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_k] + l_z], i_column_idx[l_k] + l_z, (unsigned int)i_xgemm_desc->ldb, l_k ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_flop_count += 2; } } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* add flop counter */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * i_xgemm_desc->n); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); }
transpose.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <complex.h> #include "np_helper.h" /* * matrix a[n,m] */ void NPdtranspose(int n, int m, double *a, double *at) { size_t i, j, j0, j1; for (j0 = 0; j0 < n; j0+=BLOCK_DIM) { j1 = MIN(j0+BLOCK_DIM, n); for (i = 0; i < m; i++) { for (j = j0; j < j1; j++) { at[i*n+j] = a[j*m+i]; } } } } void NPztranspose(int n, int m, double complex *a, double complex *at) { size_t i, j, j0, j1; for (j0 = 0; j0 < n; j0+=BLOCK_DIM) { j1 = MIN(j0+BLOCK_DIM, n); for (i = 0; i < m; i++) { for (j = j0; j < j1; j++) { at[i*n+j] = a[j*m+i]; } } } } void NPdtranspose_021(int *shape, double *a, double *at) { #pragma omp parallel default(none) \ shared(shape, a, at) { int ic; size_t nm = shape[1] * shape[2]; #pragma omp for schedule (static) for (ic = 0; ic < shape[0]; ic++) { NPdtranspose(shape[1], shape[2], a+ic*nm, at+ic*nm); } } } void NPztranspose_021(int *shape, double complex *a, double complex *at) { #pragma omp parallel default(none) \ shared(shape, a, at) { int ic; size_t nm = shape[1] * shape[2]; #pragma omp for schedule (static) for (ic = 0; ic < shape[0]; ic++) { NPztranspose(shape[1], shape[2], a+ic*nm, at+ic*nm); } } } void NPdsymm_sum(int n, double *a, double *out, int hermi) { size_t i, j, j0, j1; double tmp; if (hermi == HERMITIAN || hermi == SYMMETRIC) { TRIU_LOOP(i, j) { tmp = a[i*n+j] + a[j*n+i]; out[i*n+j] = tmp; out[j*n+i] = tmp; } } else { TRIU_LOOP(i, j) { tmp = a[i*n+j] - a[j*n+i]; out[i*n+j] = tmp; out[j*n+i] =-tmp; } } } void NPzhermi_sum(int n, double complex *a, double complex *out, int hermi) { size_t i, j, j0, j1; double complex tmp; if (hermi == HERMITIAN) { TRIU_LOOP(i, j) { tmp = a[i*n+j] + conj(a[j*n+i]); out[i*n+j] = tmp; out[j*n+i] = conj(tmp); } } else if (hermi == SYMMETRIC) { TRIU_LOOP(i, j) { tmp = a[i*n+j] + a[j*n+i]; out[i*n+j] = tmp; out[j*n+i] = tmp; } } else { TRIU_LOOP(i, j) { tmp = a[i*n+j] - conj(a[j*n+i]); out[i*n+j] = tmp; out[j*n+i] =-conj(tmp); } } } void NPdsymm_021_sum(int *shape, double *a, double *out, int hermi) { #pragma omp parallel default(none) \ shared(shape, a, out, hermi) { int ic; size_t nn = shape[1] * shape[1]; #pragma omp for schedule (static) for (ic = 0; ic < shape[0]; ic++) { NPdsymm_sum(shape[1], a+ic*nn, out+ic*nn, hermi); } } } void NPzhermi_021_sum(int *shape, double complex *a, double complex *out, int hermi) { #pragma omp parallel default(none) \ shared(shape, a, out, hermi) { int ic; size_t nn = shape[1] * shape[1]; #pragma omp for schedule (static) for (ic = 0; ic < shape[0]; ic++) { NPzhermi_sum(shape[1], a+ic*nn, out+ic*nn, hermi); } } }
LETKFSolver.h
/* * (C) Copyright 2020 UCAR. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. */ #ifndef OOPS_ASSIMILATION_LETKFSOLVER_H_ #define OOPS_ASSIMILATION_LETKFSOLVER_H_ #include <Eigen/Dense> #include <cfloat> #include <memory> #include <string> #include <vector> #include "eckit/config/LocalConfiguration.h" #include "oops/assimilation/LETKFSolverParameters.h" #include "oops/assimilation/LocalEnsembleSolver.h" #include "oops/base/Departures.h" #include "oops/base/DeparturesEnsemble.h" #include "oops/base/Geometry.h" #include "oops/base/IncrementEnsemble4D.h" #include "oops/base/LocalIncrement.h" #include "oops/base/ObsErrors.h" #include "oops/base/ObsLocalizations.h" #include "oops/base/ObsSpaces.h" #include "oops/interface/GeometryIterator.h" #include "oops/util/Logger.h" #include "oops/util/Timer.h" namespace oops { /// Local Ensemble Tranform Kalman Filter solver /*! * An implementation of the LETKF from Hunt et al. 2007 * this version is implemented using Eigen algebra and * temporary Eigen matrices for Xa and Xb * this verion implements RTPP and RTPS. * * Hunt, B. R., Kostelich, E. J., & Szunyogh, I. (2007). Efficient data * assimilation for spatiotemporal chaos: A local ensemble transform Kalman * filter. Physica D: Nonlinear Phenomena, 230(1-2), 112-126. */ template <typename MODEL, typename OBS> class LETKFSolver : public LocalEnsembleSolver<MODEL, OBS> { typedef Departures<OBS> Departures_; typedef DeparturesEnsemble<OBS> DeparturesEnsemble_; typedef Geometry<MODEL> Geometry_; typedef GeometryIterator<MODEL> GeometryIterator_; typedef IncrementEnsemble4D<MODEL> IncrementEnsemble4D_; typedef ObsErrors<OBS> ObsErrors_; typedef ObsLocalizations<MODEL, OBS> ObsLocalizations_; typedef ObsSpaces<OBS> ObsSpaces_; typedef State4D<MODEL> State4D_; public: static const std::string classname() {return "oops::LETKFSolver";} LETKFSolver(ObsSpaces_ &, const Geometry_ &, const eckit::Configuration &, size_t, const State4D_ &); /// KF update + posterior inflation at a grid point location (GeometryIterator_) void measurementUpdate(const IncrementEnsemble4D_ &, const GeometryIterator_ &, IncrementEnsemble4D_ &) override; protected: /// Computes weights for ensemble update with local observations /// \param[in] omb Observation departures (nlocalobs) /// \param[in] Yb Ensemble perturbations (nens, nlocalobs) /// \param[in] invvarR Inverse of observation error variances (nlocalobs) virtual void computeWeights(const Eigen::VectorXd & omb, const Eigen::MatrixXd & Yb, const Eigen::VectorXd & invvarR); /// Applies weights and adds posterior inflation virtual void applyWeights(const IncrementEnsemble4D_ &, IncrementEnsemble4D_ &, const GeometryIterator_ &); LETKFSolverParameters options_; Eigen::MatrixXd Wa_; // transformation matrix for ens. perts. Xa=Xf*Wa Eigen::VectorXd wa_; // transformation matrix for ens. mean xa=xf*wa // eigen solver matrices Eigen::VectorXd eival_; Eigen::MatrixXd eivec_; const size_t nens_; // ensemble size }; // ----------------------------------------------------------------------------- template <typename MODEL, typename OBS> LETKFSolver<MODEL, OBS>::LETKFSolver(ObsSpaces_ & obspaces, const Geometry_ & geometry, const eckit::Configuration & config, size_t nens, const State4D_ & xbmean) : LocalEnsembleSolver<MODEL, OBS>(obspaces, geometry, config, nens, xbmean), nens_(nens) { options_.deserialize(config); const LETKFInflationParameters & inflopt = options_.infl; Log::info() << "Using EIGEN implementation of LETKF" << std::endl; Log::info() << "Multiplicative inflation multCoeff=" << inflopt.mult << std::endl; if (inflopt.dortpp()) { Log::info() << "RTPP inflation will be applied with rtppCoeff=" << inflopt.rtpp << std::endl; } else { Log::info() << "RTPP inflation is not applied rtppCoeff is out of bounds (0,1], rtppCoeff=" << inflopt.rtpp << std::endl; } if (inflopt.dortps()) { Log::info() << "RTPS inflation will be applied with rtpsCoeff=" << inflopt.rtps << std::endl; } else { Log::info() << "RTPS inflation is not applied rtpsCoeff is out of bounds (0,1], rtpsCoeff=" << inflopt.rtps << std::endl; } // pre-allocate transformation matrices Wa_.resize(nens_, nens_); wa_.resize(nens_); // pre-allocate eigen sovler matrices eival_.resize(nens_); eivec_.resize(nens_, nens_); } // ----------------------------------------------------------------------------- template <typename MODEL, typename OBS> void LETKFSolver<MODEL, OBS>::measurementUpdate(const IncrementEnsemble4D_ & bkg_pert, const GeometryIterator_ & i, IncrementEnsemble4D_ & ana_pert) { util::Timer timer(classname(), "measurementUpdate"); // create the local subset of observations Departures_ locvector(this->obspaces_); locvector.ones(); this->obsloc().computeLocalization(i, locvector); locvector.mask(*(this->invVarR_)); Eigen::VectorXd local_omb_vec = this->omb_.packEigen(locvector); if (local_omb_vec.size() == 0) { // no obs. so no need to update Wa_ and wa_ // ana_pert[i]=bkg_pert[i] this->copyLocalIncrement(bkg_pert, i, ana_pert); } else { // if obs are present do normal KF update // create local Yb Eigen::MatrixXd local_Yb_mat = this->Yb_.packEigen(locvector); // create local obs errors Eigen::VectorXd local_invVarR_vec = this->invVarR_->packEigen(locvector); // and apply localization Eigen::VectorXd localization = locvector.packEigen(locvector); local_invVarR_vec.array() *= localization.array(); computeWeights(local_omb_vec, local_Yb_mat, local_invVarR_vec); applyWeights(bkg_pert, ana_pert, i); } } // ----------------------------------------------------------------------------- template <typename MODEL, typename OBS> void LETKFSolver<MODEL, OBS>::computeWeights(const Eigen::VectorXd & dy, const Eigen::MatrixXd & Yb, const Eigen::VectorXd & diagInvR ) { // compute transformation matrix, save in Wa_, wa_ // uses C++ eigen interface // implements LETKF from Hunt et al. 2007 util::Timer timer(classname(), "computeWeights"); const LETKFInflationParameters & inflopt = options_.infl; // fill in the work matrix // work = Y^T R^-1 Y + (nens-1)/infl I double infl = inflopt.mult; Eigen::MatrixXd work = Yb*(diagInvR.asDiagonal()*Yb.transpose()); work.diagonal() += Eigen::VectorXd::Constant(nens_, (nens_-1)/infl); // eigenvalues and eigenvectors of the above matrix Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> es(work); eival_ = es.eigenvalues().real(); eivec_ = es.eigenvectors().real(); // Pa = [ Yb^T R^-1 Yb + (nens-1)/infl I ] ^-1 work = eivec_ * eival_.cwiseInverse().asDiagonal() * eivec_.transpose(); // Wa = sqrt[ (nens-1) Pa ] Wa_ = eivec_ * ((nens_-1) * eival_.array().inverse()).sqrt().matrix().asDiagonal() * eivec_.transpose(); // wa = Pa Yb^T R^-1 dy wa_ = work * (Yb * (diagInvR.asDiagonal()*dy)); } // ----------------------------------------------------------------------------- template <typename MODEL, typename OBS> void LETKFSolver<MODEL, OBS>::applyWeights(const IncrementEnsemble4D_ & bkg_pert, IncrementEnsemble4D_ & ana_pert, const GeometryIterator_ & i) { // applies Wa_, wa_ util::Timer timer(classname(), "applyWeights"); const LETKFInflationParameters & inflopt = options_.infl; LocalIncrement gptmpl = bkg_pert[0][0].getLocal(i); std::vector<double> tmp1 = gptmpl.getVals(); size_t ngp = tmp1.size(); // loop through analysis times and ens. members for (size_t itime=0; itime < bkg_pert[0].size(); ++itime) { // make grid point forecast pert ensemble array Eigen::MatrixXd Xb(ngp, nens_); // #pragma omp parallel for for (size_t iens=0; iens < nens_; ++iens) { LocalIncrement gp = bkg_pert[iens][itime].getLocal(i); std::vector<double> tmp = gp.getVals(); for (size_t iv=0; iv < ngp; ++iv) { Xb(iv, iens) = tmp[iv]; } } // postmulptiply Eigen::VectorXd xa = Xb*wa_; // ensemble mean update Eigen::MatrixXd Xa = Xb*Wa_; // ensemble perturbation update // RTPP inflation if (inflopt.dortpp()) { Xa = (1-inflopt.rtpp)*Xa+inflopt.rtpp*Xb; } // RTPS inflation double eps = DBL_EPSILON; if (inflopt.dortps()) { // posterior spread Eigen::ArrayXd asprd = Xa.array().square().rowwise().sum()/(nens_-1); asprd = asprd.sqrt(); asprd = (asprd < eps).select(eps, asprd); // avoid nan overflow for vars with no spread // prior spread Eigen::ArrayXd fsprd = Xb.array().square().rowwise().sum()/(nens_-1); fsprd = fsprd.sqrt(); fsprd = (fsprd < eps).select(eps, fsprd); // rtps inflation factor Eigen::ArrayXd rtpsInfl = inflopt.rtps*((fsprd-asprd)/asprd) + 1; rtpsInfl = (rtpsInfl < inflopt.rtpsInflMin()).select(inflopt.rtpsInflMin(), rtpsInfl); rtpsInfl = (rtpsInfl > inflopt.rtpsInflMax()).select(inflopt.rtpsInflMax(), rtpsInfl); // inlfate perturbation matrix Xa.array().colwise() *= rtpsInfl; } // assign Xa to ana_pert // #pragma omp parallel for private(tmp1) for (size_t iens=0; iens < nens_; ++iens) { for (size_t iv=0; iv < ngp; ++iv) { tmp1[iv] = Xa(iv, iens)+xa(iv); // if Xa = Xb*Wa; } gptmpl.setVals(tmp1); ana_pert[iens][itime].setLocal(gptmpl, i); } } } // ----------------------------------------------------------------------------- } // namespace oops #endif // OOPS_ASSIMILATION_LETKFSOLVER_H_
bucle-sections-master.c
#include <stdio.h> #include <omp.h> void funcA() { printf("En funcA: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } void funcB() { printf("En funcB: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } int main() { int i = 0, j = 5, n = 10; #pragma omp parallel { #pragma omp sections { #pragma omp section { (void) funcA(); for (i; i<n/2; i++) printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i); } #pragma omp section { (void) funcB(); for (j; j<n; j++) printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),j); } } } }
GB_unaryop__abs_int8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_bool // op(A') function: GB_tran__abs_int8_bool // C type: int8_t // A type: bool // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ bool #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_bool ( int8_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrixMul.c
#include <limits.h> #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define N 1000 #define M 1000 #define L 1000 #define TIME(f, msg) _begin = omp_get_wtime(); (f); _end = omp_get_wtime(); printf("%s done in %f\n", (msg), _end - _begin); void matrixMulSeq(int *A, int *B, int *C, int n, int m, int l) { for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { C[i * m + j] = 0; for (int k = 0; k < l; k++) C[i * m + j] += A[i * l + k] * B[k * m + j]; } } } void matrixMulMp(int *A, int *B, int *C, int n, int m, int l) { int i, j, k; #pragma omp parallel shared(A, B, C, n, m) private(i, j, k) { #pragma omp for schedule(static) for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { C[i * m + j] = 0; for (k = 0; k < l; k++) { C[i * m + j] += A[i * l + k] * B[k * m + j]; } } } } } void testRand(int *A, int *B, int n, int m, int l, int max) { int sizeA = n * l; int sizeB = l * m; int _A[sizeA], _B[sizeB]; for (int i = 0; i < sizeA; i++) { _A[i] = rand() % max; } for (int i = 0; i < sizeB; i++) { _B[i] = rand() % max; } memcpy(A, _A, sizeA * sizeof(int)); memcpy(B, _B, sizeB * sizeof(int)); } int getMaxError(int *X, int *Y, int n, int m) { int max_error = -INT_MAX; for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { int _error = abs(X[i * n + j] - Y[i * n + j]); max_error = _error > max_error ? _error : max_error; } } return max_error; } int main(int argc, char *argv[]) { int n = N, m = M, l = L; int *A = (int*) malloc(n * l * sizeof(int)); int *B = (int*) malloc(l * m * sizeof(int)); int *C = (int*) malloc(n * m * sizeof(int)); int *Cseq = (int*) malloc(n * m * sizeof(int)); double _begin, _end; TIME(testRand(A, B, n, m, l, 50), "Init"); TIME(matrixMulSeq(A, B, Cseq, n, m, l), "Seq "); TIME(matrixMulMp(A, B, C, n, m, l), "MP "); printf("Max error: %d\n", getMaxError(C, Cseq, n, m)); free(A); free(B); free(C); return 0; }
Clustering.h
// // Copyright (C) 2015 Yahoo Japan Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #include "NGT/Index.h" using namespace std; #if defined(NGT_AVX_DISABLED) #define NGT_CLUSTER_NO_AVX #else #if defined(__AVX2__) #define NGT_CLUSTER_AVX2 #else #define NGT_CLUSTER_NO_AVX #endif #endif #if defined(NGT_CLUSTER_NO_AVX) #warning "*** SIMD is *NOT* available! ***" #else #include <immintrin.h> #endif #include <omp.h> #include <random> namespace NGT { class Clustering { public: enum InitializationMode { InitializationModeHead = 0, InitializationModeRandom = 1, InitializationModeKmeansPlusPlus = 2 }; enum ClusteringType { ClusteringTypeKmeansWithNGT = 0, ClusteringTypeKmeansWithoutNGT = 1, ClusteringTypeKmeansWithIteration = 2, ClusteringTypeKmeansWithNGTForCentroids = 3 }; class Entry { public: Entry():vectorID(0), centroidID(0), distance(0.0) {} Entry(size_t vid, size_t cid, double d):vectorID(vid), centroidID(cid), distance(d) {} bool operator<(const Entry &e) const {return distance > e.distance;} uint32_t vectorID; uint32_t centroidID; double distance; }; class DescendingEntry { public: DescendingEntry(size_t vid, double d):vectorID(vid), distance(d) {} bool operator<(const DescendingEntry &e) const {return distance < e.distance;} size_t vectorID; double distance; }; class Cluster { public: Cluster(std::vector<float> &c):centroid(c), radius(0.0) {} Cluster(const Cluster &c) { *this = c; } Cluster &operator=(const Cluster &c) { members = c.members; centroid = c.centroid; radius = c.radius; return *this; } std::vector<Entry> members; std::vector<float> centroid; double radius; }; Clustering(InitializationMode im = InitializationModeHead, ClusteringType ct = ClusteringTypeKmeansWithNGT, size_t mi = 100): clusteringType(ct), initializationMode(im), maximumIteration(mi) { initialize(); } void initialize() { epsilonFrom = 0.12; epsilonTo = epsilonFrom; epsilonStep = 0.04; resultSizeCoefficient = 5; } static void convert(std::vector<std::string> &strings, std::vector<float> &vector) { vector.clear(); for (auto it = strings.begin(); it != strings.end(); ++it) { vector.push_back(stod(*it)); } } static void extractVector(const std::string &str, std::vector<float> &vec) { std::vector<std::string> tokens; NGT::Common::tokenize(str, tokens, " \t"); convert(tokens, vec); } static void loadVectors(const std::string &file, std::vector<std::vector<float> > &vectors) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadVectors::Cannot open " + file ); } std::string line; size_t prevdim = 0; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); if (v.size() == 0) { std::stringstream msg; msg << "Clustering:loadVectors: Error! The dimensionality is zero." << std::endl; NGTThrowException(msg); } if (prevdim != 0 && prevdim != v.size()) { std::stringstream msg; msg << "Clustering:loadVectors: Error! The dimensionality is inconsist. " << prevdim << ":" <<v.size() << std::endl; NGTThrowException(msg); } vectors.push_back(v); prevdim = v.size(); } } static void saveVectors(const std::string &file, std::vector<std::vector<float> > &vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { std::vector<float> &v = *vit; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } static void saveVector(const std::string &file, std::vector<size_t> &vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { os << *vit << std::endl; } } static void loadClusters(const std::string &file, std::vector<Cluster> &clusters, size_t numberOfClusters = 0) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadClusters::Cannot open " + file); } std::string line; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); clusters.push_back(v); if ((numberOfClusters != 0) && (clusters.size() >= numberOfClusters)) { break; } } if ((numberOfClusters != 0) && (clusters.size() < numberOfClusters)) { std::cerr << "initial cluster data are not enough. " << clusters.size() << ":" << numberOfClusters << std::endl; exit(1); } } #if !defined(NGT_CLUSTER_NO_AVX) static double sumOfSquares(float *a, float *b, size_t size) { __m256 sum = _mm256_setzero_ps(); float *last = a + size; float *lastgroup = last - 7; while (a < lastgroup) { __m256 v = _mm256_sub_ps(_mm256_loadu_ps(a), _mm256_loadu_ps(b)); sum = _mm256_add_ps(sum, _mm256_mul_ps(v, v)); a += 8; b += 8; } __attribute__((aligned(32))) float f[8]; _mm256_store_ps(f, sum); double s = f[0] + f[1] + f[2] + f[3] + f[4] + f[5] + f[6] + f[7]; while (a < last) { double d = *a++ - *b++; s += d * d; } return s; } #else // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double sumOfSquares(float *a, float *b, size_t size) { double csum = 0.0; float *x = a; float *y = b; for (size_t i = 0; i < size; i++) { double d = (double)*x++ - (double)*y++; csum += d * d; } return csum; } #endif // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double distanceL2(std::vector<float> &vector1, std::vector<float> &vector2) { return sqrt(sumOfSquares(&vector1[0], &vector2[0], vector1.size())); } static double distanceL2(std::vector<std::vector<float> > &vector1, std::vector<std::vector<float> > &vector2) { assert(vector1.size() == vector2.size()); double distance = 0.0; for (size_t i = 0; i < vector1.size(); i++) { distance += distanceL2(vector1[i], vector2[i]); } distance /= (double)vector1.size(); return distance; } static double meanSumOfSquares(std::vector<float> &vector1, std::vector<float> &vector2) { return sumOfSquares(&vector1[0], &vector2[0], vector1.size()) / (double)vector1.size(); } static void subtract(std::vector<float> &a, std::vector<float> &b) { assert(a.size() == b.size()); auto bit = b.begin(); for (auto ait = a.begin(); ait != a.end(); ++ait, ++bit) { *ait = *ait - *bit; } } static void getInitialCentroidsFromHead(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); for (size_t i = 0; i < size; i++) { clusters.push_back(Cluster(vectors[i])); } } static void getInitialCentroidsRandomly(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size, size_t seed) { clusters.clear(); std::random_device rnd; if (seed == 0) { seed = rnd(); } std::mt19937 mt(seed); for (size_t i = 0; i < size; i++) { size_t idx = mt() * vectors.size() / mt.max(); if (idx >= size) { i--; continue; } clusters.push_back(Cluster(vectors[idx])); } assert(clusters.size() == size); } static void getInitialCentroidsKmeansPlusPlus(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); std::random_device rnd; std::mt19937 mt(rnd()); size_t idx = (long long)mt() * (long long)vectors.size() / (long long)mt.max(); clusters.push_back(Cluster(vectors[idx])); NGT::Timer timer; for (size_t k = 1; k < size; k++) { double sum = 0; std::priority_queue<DescendingEntry> sortedObjects; // get d^2 and sort #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; double mind = DBL_MAX; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); d *= d; if (d < mind) { mind = d; } } #pragma omp critical { sortedObjects.push(DescendingEntry(distance(vectors.begin(), vit), mind)); sum += mind; } } double l = (double)mt() / (double)mt.max() * sum; while (!sortedObjects.empty()) { sum -= sortedObjects.top().distance; if (l >= sum) { clusters.push_back(Cluster(vectors[sortedObjects.top().vectorID])); break; } sortedObjects.pop(); } } } static void assign(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t clusterSize = std::numeric_limits<size_t>::max()) { // compute distances to the nearest clusters, and construct heap by the distances. NGT::Timer timer; timer.start(); std::vector<Entry> sortedObjects(vectors.size()); #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } sortedObjects[vi] = Entry(vi, mincidx, mind); } } std::sort(sortedObjects.begin(), sortedObjects.end()); // clear for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } // distribute objects to the nearest clusters in the same size constraint. for (auto soi = sortedObjects.rbegin(); soi != sortedObjects.rend();) { Entry &entry = *soi; if (entry.centroidID >= clusters.size()) { std::cerr << "Something wrong. " << entry.centroidID << ":" << clusters.size() << std::endl; soi++; continue; } if (clusters[entry.centroidID].members.size() < clusterSize) { clusters[entry.centroidID].members.push_back(entry); soi++; } else { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() >= clusterSize) { continue; } double d = distanceL2(vectors[entry.vectorID], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } entry = Entry(entry.vectorID, mincidx, mind); int pt = distance(sortedObjects.rbegin(), soi); std::sort(sortedObjects.begin(), soi.base()); soi = sortedObjects.rbegin() + pt; assert(pt == distance(sortedObjects.rbegin(), soi)); } } moveFartherObjectsToEmptyClusters(clusters); } static void moveFartherObjectsToEmptyClusters(std::vector<Cluster> &clusters) { size_t emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; double max = -DBL_MAX; auto maxit = clusters.begin(); for (auto scit = clusters.begin(); scit != clusters.end(); ++scit) { if ((*scit).members.size() >= 2 && (*scit).members.back().distance > max) { maxit = scit; max = (*scit).members.back().distance; } } if (max == -DBL_MAX) { std::stringstream msg; msg << "Clustering::moveFartherObjectsToEmptyClusters: Not found max. "; for (auto scit = clusters.begin(); scit != clusters.end(); ++scit) { msg << distance(clusters.begin(), scit) << ":" << (*scit).members.size() << " "; } NGTThrowException(msg); } (*cit).members.push_back((*maxit).members.back()); (*cit).members.back().centroidID = distance(clusters.begin(), cit); (*maxit).members.pop_back(); } } emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; } } } static void assignWithNGT(NGT::Index &index, std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, size_t &resultSize, float epsilon = 0.12, size_t clusterSize = std::numeric_limits<size_t>::max()) { size_t dataSize = vectors.size(); assert(index.getObjectRepositorySize() - 1 == vectors.size()); vector<vector<Entry> > results(clusters.size()); #pragma omp parallel for for (size_t ci = 0; ci < clusters.size(); ci++) { auto cit = clusters.begin() + ci; NGT::ObjectDistances objects; NGT::Object *query = 0; query = index.allocateObject((*cit).centroid); NGT::SearchContainer sc(*query); sc.setResults(&objects); sc.setEpsilon(epsilon); sc.setSize(resultSize); index.search(sc); results[ci].reserve(objects.size()); for (size_t idx = 0; idx < objects.size(); idx++) { size_t oidx = objects[idx].id - 1; results[ci].push_back(Entry(oidx, ci, objects[idx].distance)); } index.deleteObject(query); } size_t resultCount = 0; for (auto ri = results.begin(); ri != results.end(); ++ri) { resultCount += (*ri).size(); } vector<Entry> sortedDistances; sortedDistances.reserve(resultCount); for (auto ri = results.begin(); ri != results.end(); ++ri) { std::copy((*ri).begin(), (*ri).end(), std::back_inserter(sortedDistances)); } vector<bool> assignedObjects(dataSize, false); sort(sortedDistances.begin(), sortedDistances.end()); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } size_t assignedObjectCount = 0; for (auto i = sortedDistances.rbegin(); i != sortedDistances.rend(); ++i) { size_t objectID = (*i).vectorID; size_t clusterID = (*i).centroidID; if (clusters[clusterID].members.size() >= clusterSize) { continue; } if (!assignedObjects[objectID]) { assignedObjects[objectID] = true; clusters[clusterID].members.push_back(*i); clusters[clusterID].members.back().centroidID = clusterID; assignedObjectCount++; } } //size_t notAssignedObjectCount = 0; vector<uint32_t> notAssignedObjectIDs; notAssignedObjectIDs.reserve(dataSize - assignedObjectCount); for (size_t idx = 0; idx < dataSize; idx++) { if (!assignedObjects[idx]) { notAssignedObjectIDs.push_back(idx); } } if (clusterSize < std::numeric_limits<size_t>::max()) { do { vector<vector<Entry>> notAssignedObjects(notAssignedObjectIDs.size()); size_t nOfClosestClusters = 1 * 1024 * 1024 * 1024 / 16 / (notAssignedObjectIDs.size() == 0 ? 1 : notAssignedObjectIDs.size()); #pragma omp parallel for for (size_t vi = 0; vi < notAssignedObjectIDs.size(); vi++) { auto vit = notAssignedObjectIDs.begin() + vi; if (assignedObjects[*vit]) { continue; } vector<Entry> ds; ds.reserve(clusters.size()); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() >= clusterSize) { continue; } double d = distanceL2(vectors[*vit], (*cit).centroid); ds.push_back(Entry(*vit, distance(clusters.begin(), cit), d)); } sort(ds.begin(), ds.end()); size_t topk = ds.size() < nOfClosestClusters ? ds.size() : nOfClosestClusters; std::copy(ds.end() - topk, ds.end(), std::back_inserter(notAssignedObjects[vi])); } sortedDistances.clear(); for (auto i = notAssignedObjects.begin(); i != notAssignedObjects.end(); ++i) { std::copy((*i).begin(), (*i).end(), std::back_inserter(sortedDistances)); vector<Entry> empty; (*i).swap(empty); } sort(sortedDistances.begin(), sortedDistances.end()); for (auto i = sortedDistances.rbegin(); i != sortedDistances.rend(); ++i) { size_t objectID = (*i).vectorID; size_t clusterID = (*i).centroidID; if (clusters[clusterID].members.size() >= clusterSize) { continue; } if (!assignedObjects[objectID]) { assignedObjects[objectID] = true; clusters[clusterID].members.push_back(*i); clusters[clusterID].members.back().centroidID = clusterID; } } } while (std::any_of(assignedObjects.begin(), assignedObjects.end(), [](bool x){ return !x; })); } else { vector<Entry> notAssignedObjects(notAssignedObjectIDs.size()); #pragma omp parallel for for (size_t vi = 0; vi < notAssignedObjectIDs.size(); vi++) { auto vit = notAssignedObjectIDs.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(vectors[*vit], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } notAssignedObjects[vi] = Entry(*vit, mincidx, mind); // Entry(vectorID, centroidID, distance) } } for (auto nroit = notAssignedObjects.begin(); nroit != notAssignedObjects.end(); ++nroit) { clusters[(*nroit).centroidID].members.push_back(*nroit); } moveFartherObjectsToEmptyClusters(clusters); } } static double calculateCentroid(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters) { double distance = 0; size_t memberCount = 0; for (auto it = clusters.begin(); it != clusters.end(); ++it) { memberCount += (*it).members.size(); if ((*it).members.size() != 0) { std::vector<float> mean(vectors[0].size(), 0.0); for (auto memit = (*it).members.begin(); memit != (*it).members.end(); ++memit) { auto mit = mean.begin(); auto &v = vectors[(*memit).vectorID]; for (auto vit = v.begin(); vit != v.end(); ++vit, ++mit) { *mit += *vit; } } for (auto mit = mean.begin(); mit != mean.end(); ++mit) { *mit /= (*it).members.size(); } distance += distanceL2((*it).centroid, mean); (*it).centroid = mean; } else { cerr << "Clustering: Fatal Error. No member!" << endl; abort(); } } return distance; } static void saveClusters(const std::string &file, std::vector<Cluster> &clusters) { std::ofstream os(file); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { std::vector<float> &v = (*cit).centroid; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } double kmeansWithoutNGT(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { size_t clusterSize = std::numeric_limits<size_t>::max(); if (clusterSizeConstraint) { clusterSize = ceil((double)vectors.size() / (double)numberOfClusters); } double diff = 0; for (size_t i = 0; i < maximumIteration; i++) { assign(vectors, clusters, clusterSize); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. diff = calculateCentroid(vectors, clusters); if (diff == 0) { break; } } return diff == 0; } double kmeansWithNGT(NGT::Index &index, std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters, float epsilon) { size_t clusterSize = std::numeric_limits<size_t>::max(); if (clusterSizeConstraint) { clusterSize = ceil((double)vectors.size() / (double)numberOfClusters); for (size_t ci = 0; ci < clusters.size(); ci++) { clusters[ci].members.reserve(clusterSize); } } diffHistory.clear(); NGT::Timer timer; timer.start(); double diff = 0.0; size_t resultSize; resultSize = resultSizeCoefficient * vectors.size() / clusters.size(); for (size_t i = 0; i < maximumIteration; i++) { assignWithNGT(index, vectors, clusters, resultSize, epsilon, clusterSize); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. std::vector<Cluster> prevClusters = clusters; diff = calculateCentroid(vectors, clusters); timer.stop(); timer.start(); diffHistory.push_back(diff); if (diff == 0) { break; } } return diff; } #ifndef NGT_SHARED_MEMORY_ALLOCATOR double kmeansWithNGT(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { pid_t pid = getpid(); std::stringstream str; str << "cluster-ngt." << pid; string database = str.str(); string dataFile; size_t dataSize = 0; size_t dim = clusters.front().centroid.size(); NGT::Property property; property.dimension = dim; property.graphType = NGT::Property::GraphType::GraphTypeANNG; property.objectType = NGT::Index::Property::ObjectType::Float; property.distanceType = NGT::Index::Property::DistanceType::DistanceTypeL2; float *data = new float[vectors.size() * dim]; float *ptr = data; dataSize = vectors.size(); for (auto vi = vectors.begin(); vi != vectors.end(); ++vi) { memcpy(ptr, &((*vi)[0]), dim * sizeof(float)); ptr += dim; } size_t threadSize = 20; NGT::Index index(property); index.append(data, dataSize); index.createIndex(threadSize); return kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilonFrom); } #endif double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters, std::vector<Cluster> &clusters) { NGT::GraphIndex &graph = static_cast<NGT::GraphIndex&>(index.getIndex()); NGT::ObjectSpace &os = graph.getObjectSpace(); size_t size = os.getRepository().size(); std::vector<std::vector<float> > vectors(size - 1); for (size_t idx = 1; idx < size; idx++) { try { os.getObject(idx, vectors[idx - 1]); } catch(...) { cerr << "Cannot get object " << idx << endl; } } double diff = DBL_MAX; clusters.clear(); setupInitialClusters(vectors, numberOfClusters, clusters); for (float epsilon = epsilonFrom; epsilon <= epsilonTo; epsilon += epsilonStep) { diff = kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilon); if (diff == 0.0) { return diff; } } return diff; } double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters, NGT::Index &outIndex) { std::vector<Cluster> clusters; double diff = kmeansWithNGT(index, numberOfClusters, clusters); for (auto i = clusters.begin(); i != clusters.end(); ++i) { outIndex.insert((*i).centroid); } outIndex.createIndex(16); return diff; } double kmeansWithNGT(NGT::Index &index, size_t numberOfClusters) { NGT::Property prop; index.getProperty(prop); string path = index.getPath(); index.save(); index.close(); string outIndexName = path; string inIndexName = path + ".tmp"; std::rename(outIndexName.c_str(), inIndexName.c_str()); NGT::Index::createGraphAndTree(outIndexName, prop); index.open(outIndexName); NGT::Index inIndex(inIndexName); double diff = kmeansWithNGT(inIndex, numberOfClusters, index); inIndex.close(); NGT::Index::destroy(inIndexName); return diff; } double kmeansWithNGT(string &indexName, size_t numberOfClusters) { NGT::Index inIndex(indexName); double diff = kmeansWithNGT(inIndex, numberOfClusters); inIndex.save(); inIndex.close(); return diff; } static double calculateMSE(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters) { double mse = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { mse += meanSumOfSquares((*cit).centroid, vectors[(*mit).vectorID]); } } assert(vectors.size() == count); return mse / (double)vectors.size(); } static double calculateML2(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters) { double d = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); double localD= 0.0; for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { double distance = distanceL2((*cit).centroid, vectors[(*mit).vectorID]); d += distance; localD += distance; } } if (vectors.size() != count) { std::cerr << "Warning! vectors.size() != count" << std::endl; } return d / (double)vectors.size(); } static double calculateML2FromSpecifiedCentroids(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, std::vector<size_t> &centroidIds) { double d = 0.0; size_t count = 0; for (auto it = centroidIds.begin(); it != centroidIds.end(); ++it) { Cluster &cluster = clusters[(*it)]; count += cluster.members.size(); for (auto mit = cluster.members.begin(); mit != cluster.members.end(); ++mit) { d += distanceL2(cluster.centroid, vectors[(*mit).vectorID]); } } return d / (double)vectors.size(); } void setupInitialClusters(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { if (clusters.empty()) { switch (initializationMode) { case InitializationModeHead: { getInitialCentroidsFromHead(vectors, clusters, numberOfClusters); break; } case InitializationModeRandom: { getInitialCentroidsRandomly(vectors, clusters, numberOfClusters, 0); break; } case InitializationModeKmeansPlusPlus: { getInitialCentroidsKmeansPlusPlus(vectors, clusters, numberOfClusters); break; } default: std::cerr << "proper initMode is not specified." << std::endl; exit(1); } } } bool kmeans(std::vector<std::vector<float> > &vectors, size_t numberOfClusters, std::vector<Cluster> &clusters) { setupInitialClusters(vectors, numberOfClusters, clusters); switch (clusteringType) { case ClusteringTypeKmeansWithoutNGT: return kmeansWithoutNGT(vectors, numberOfClusters, clusters); break; #ifndef NGT_SHARED_MEMORY_ALLOCATOR case ClusteringTypeKmeansWithNGT: return kmeansWithNGT(vectors, numberOfClusters, clusters); break; #endif default: cerr << "kmeans::fatal error!. invalid clustering type. " << clusteringType << endl; abort(); break; } } static void evaluate(std::vector<std::vector<float> > &vectors, std::vector<Cluster> &clusters, char mode, std::vector<size_t> centroidIds = std::vector<size_t>()) { size_t clusterSize = std::numeric_limits<size_t>::max(); assign(vectors, clusters, clusterSize); std::cout << "The number of vectors=" << vectors.size() << std::endl; std::cout << "The number of centroids=" << clusters.size() << std::endl; if (centroidIds.size() == 0) { switch (mode) { case 'e': std::cout << "MSE=" << calculateMSE(vectors, clusters) << std::endl; break; case '2': default: std::cout << "ML2=" << calculateML2(vectors, clusters) << std::endl; break; } } else { switch (mode) { case 'e': break; case '2': default: std::cout << "ML2=" << calculateML2FromSpecifiedCentroids(vectors, clusters, centroidIds) << std::endl; break; } } } ClusteringType clusteringType; InitializationMode initializationMode; size_t numberOfClusters; bool clusterSizeConstraint; size_t maximumIteration; float epsilonFrom; float epsilonTo; float epsilonStep; size_t resultSizeCoefficient; vector<double> diffHistory; }; }
editmesh_utils.c
/* * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) 2004 by Blender Foundation. * All rights reserved. * * The Original Code is: all of this file. * * Contributor(s): Joseph Eagar * * ***** END GPL LICENSE BLOCK ***** */ /** \file blender/editors/mesh/editmesh_utils.c * \ingroup edmesh */ #include "MEM_guardedalloc.h" #include "DNA_mesh_types.h" #include "DNA_object_types.h" #include "DNA_key_types.h" #include "BLI_math.h" #include "BLI_alloca.h" #include "BLI_buffer.h" #include "BLI_kdtree.h" #include "BLI_listbase.h" #include "BKE_DerivedMesh.h" #include "BKE_context.h" #include "BKE_global.h" #include "BKE_depsgraph.h" #include "BKE_main.h" #include "BKE_mesh.h" #include "BKE_mesh_mapping.h" #include "BKE_report.h" #include "BKE_editmesh.h" #include "BKE_editmesh_bvh.h" #include "BKE_object.h" /* XXX. only for EDBM_mesh_ensure_valid_dm_hack() which will be removed */ #include "WM_api.h" #include "WM_types.h" #include "ED_mesh.h" #include "ED_screen.h" #include "ED_view3d.h" #include "mesh_intern.h" /* own include */ /* mesh backup implementation. This would greatly benefit from some sort of binary diffing * just as the undo stack would. So leaving this as an interface for further work */ BMBackup EDBM_redo_state_store(BMEditMesh *em) { BMBackup backup; backup.bmcopy = BM_mesh_copy(em->bm); return backup; } void EDBM_redo_state_restore(BMBackup backup, BMEditMesh *em, int recalctess) { BMesh *tmpbm; if (!em || !backup.bmcopy) return; BM_mesh_data_free(em->bm); tmpbm = BM_mesh_copy(backup.bmcopy); *em->bm = *tmpbm; MEM_freeN(tmpbm); tmpbm = NULL; if (recalctess) BKE_editmesh_tessface_calc(em); } void EDBM_redo_state_free(BMBackup *backup, BMEditMesh *em, int recalctess) { if (em && backup->bmcopy) { BM_mesh_data_free(em->bm); *em->bm = *backup->bmcopy; } else if (backup->bmcopy) { BM_mesh_data_free(backup->bmcopy); } if (backup->bmcopy) MEM_freeN(backup->bmcopy); backup->bmcopy = NULL; if (recalctess && em) BKE_editmesh_tessface_calc(em); } /* hack to workaround multiple operators being called within the same event loop without an update * see: [#31811] */ void EDBM_mesh_ensure_valid_dm_hack(Scene *scene, BMEditMesh *em) { if ((((ID *)em->ob->data)->tag & LIB_TAG_ID_RECALC) || (em->ob->recalc & OB_RECALC_DATA)) { /* since we may not have done selection flushing */ if ((em->ob->recalc & OB_RECALC_DATA) == 0) { DAG_id_tag_update(&em->ob->id, OB_RECALC_DATA); } BKE_object_handle_update(G.main->eval_ctx, scene, em->ob); } } void EDBM_mesh_normals_update(BMEditMesh *em) { BM_mesh_normals_update(em->bm); } void EDBM_mesh_clear(BMEditMesh *em) { /* clear bmesh */ BM_mesh_clear(em->bm); /* free derived meshes */ BKE_editmesh_free_derivedmesh(em); /* free tessellation data */ em->tottri = 0; if (em->looptris) { MEM_freeN(em->looptris); em->looptris = NULL; } } void EDBM_stats_update(BMEditMesh *em) { const char iter_types[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH}; BMIter iter; BMElem *ele; int *tots[3]; int i; tots[0] = &em->bm->totvertsel; tots[1] = &em->bm->totedgesel; tots[2] = &em->bm->totfacesel; em->bm->totvertsel = em->bm->totedgesel = em->bm->totfacesel = 0; for (i = 0; i < 3; i++) { ele = BM_iter_new(&iter, em->bm, iter_types[i], NULL); for ( ; ele; ele = BM_iter_step(&iter)) { if (BM_elem_flag_test(ele, BM_ELEM_SELECT)) { (*tots[i])++; } } } } DerivedMesh *EDBM_mesh_deform_dm_get(BMEditMesh *em) { return ((em->derivedFinal != NULL) && (em->derivedFinal->type == DM_TYPE_EDITBMESH) && (em->derivedFinal->deformedOnly != false)) ? em->derivedFinal : NULL; } bool EDBM_op_init(BMEditMesh *em, BMOperator *bmop, wmOperator *op, const char *fmt, ...) { BMesh *bm = em->bm; va_list list; va_start(list, fmt); if (!BMO_op_vinitf(bm, bmop, BMO_FLAG_DEFAULTS, fmt, list)) { BKE_reportf(op->reports, RPT_ERROR, "Parse error in %s", __func__); va_end(list); return false; } if (!em->emcopy) em->emcopy = BKE_editmesh_copy(em); em->emcopyusers++; va_end(list); return true; } /* returns 0 on error, 1 on success. executes and finishes a bmesh operator */ bool EDBM_op_finish(BMEditMesh *em, BMOperator *bmop, wmOperator *op, const bool do_report) { const char *errmsg; BMO_op_finish(em->bm, bmop); if (BMO_error_get(em->bm, &errmsg, NULL)) { BMEditMesh *emcopy = em->emcopy; if (do_report) { BKE_report(op->reports, RPT_ERROR, errmsg); } EDBM_mesh_free(em); *em = *emcopy; MEM_freeN(emcopy); em->emcopyusers = 0; em->emcopy = NULL; /* when copying, tessellation isn't to for faster copying, * but means we need to re-tessellate here */ if (em->looptris == NULL) { BKE_editmesh_tessface_calc(em); } return false; } else { em->emcopyusers--; if (em->emcopyusers < 0) { printf("warning: em->emcopyusers was less than zero.\n"); } if (em->emcopyusers <= 0) { BKE_editmesh_free(em->emcopy); MEM_freeN(em->emcopy); em->emcopy = NULL; } return true; } } bool EDBM_op_callf(BMEditMesh *em, wmOperator *op, const char *fmt, ...) { BMesh *bm = em->bm; BMOperator bmop; va_list list; va_start(list, fmt); if (!BMO_op_vinitf(bm, &bmop, BMO_FLAG_DEFAULTS, fmt, list)) { BKE_reportf(op->reports, RPT_ERROR, "Parse error in %s", __func__); va_end(list); return false; } if (!em->emcopy) em->emcopy = BKE_editmesh_copy(em); em->emcopyusers++; BMO_op_exec(bm, &bmop); va_end(list); return EDBM_op_finish(em, &bmop, op, true); } bool EDBM_op_call_and_selectf(BMEditMesh *em, wmOperator *op, const char *select_slot_out, const bool select_extend, const char *fmt, ...) { BMOpSlot *slot_select_out; BMesh *bm = em->bm; BMOperator bmop; va_list list; char hflag; va_start(list, fmt); if (!BMO_op_vinitf(bm, &bmop, BMO_FLAG_DEFAULTS, fmt, list)) { BKE_reportf(op->reports, RPT_ERROR, "Parse error in %s", __func__); va_end(list); return false; } if (!em->emcopy) em->emcopy = BKE_editmesh_copy(em); em->emcopyusers++; BMO_op_exec(bm, &bmop); slot_select_out = BMO_slot_get(bmop.slots_out, select_slot_out); hflag = slot_select_out->slot_subtype.elem & BM_ALL_NOLOOP; BLI_assert(hflag != 0); if (select_extend == false) { BM_mesh_elem_hflag_disable_all(em->bm, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_SELECT, false); } BMO_slot_buffer_hflag_enable(em->bm, bmop.slots_out, select_slot_out, hflag, BM_ELEM_SELECT, true); va_end(list); return EDBM_op_finish(em, &bmop, op, true); } bool EDBM_op_call_silentf(BMEditMesh *em, const char *fmt, ...) { BMesh *bm = em->bm; BMOperator bmop; va_list list; va_start(list, fmt); if (!BMO_op_vinitf(bm, &bmop, BMO_FLAG_DEFAULTS, fmt, list)) { va_end(list); return false; } if (!em->emcopy) em->emcopy = BKE_editmesh_copy(em); em->emcopyusers++; BMO_op_exec(bm, &bmop); va_end(list); return EDBM_op_finish(em, &bmop, NULL, false); } void EDBM_selectmode_to_scene(bContext *C) { Scene *scene = CTX_data_scene(C); Object *obedit = CTX_data_edit_object(C); BMEditMesh *em = BKE_editmesh_from_object(obedit); if (!em) return; scene->toolsettings->selectmode = em->selectmode; /* Request redraw of header buttons (to show new select mode) */ WM_event_add_notifier(C, NC_SCENE | ND_TOOLSETTINGS, scene); } void EDBM_mesh_make(ToolSettings *ts, Object *ob, const bool add_key_index) { Mesh *me = ob->data; BMesh *bm; if (UNLIKELY(!me->mpoly && me->totface)) { BKE_mesh_convert_mfaces_to_mpolys(me); } bm = BKE_mesh_to_bmesh( me, ob, add_key_index, &((struct BMeshCreateParams){.use_toolflags = true,})); if (me->edit_btmesh) { /* this happens when switching shape keys */ EDBM_mesh_free(me->edit_btmesh); MEM_freeN(me->edit_btmesh); } /* currently executing operators re-tessellates, so we can avoid doing here * but at some point it may need to be added back. */ #if 0 me->edit_btmesh = BKE_editmesh_create(bm, true); #else me->edit_btmesh = BKE_editmesh_create(bm, false); #endif me->edit_btmesh->selectmode = me->edit_btmesh->bm->selectmode = ts->selectmode; me->edit_btmesh->mat_nr = (ob->actcol > 0) ? ob->actcol - 1 : 0; me->edit_btmesh->ob = ob; /* we need to flush selection because the mode may have changed from when last in editmode */ EDBM_selectmode_flush(me->edit_btmesh); } /** * \warning This can invalidate the #DerivedMesh cache of other objects (for linked duplicates). * Most callers should run #DAG_id_tag_update on \a ob->data, see: T46738, T46913 */ void EDBM_mesh_load(Object *ob) { Mesh *me = ob->data; BMesh *bm = me->edit_btmesh->bm; /* Workaround for T42360, 'ob->shapenr' should be 1 in this case. * however this isn't synchronized between objects at the moment. */ if (UNLIKELY((ob->shapenr == 0) && (me->key && !BLI_listbase_is_empty(&me->key->block)))) { bm->shapenr = 1; } BM_mesh_bm_to_me(bm, me, (&(struct BMeshToMeshParams){0})); #ifdef USE_TESSFACE_DEFAULT BKE_mesh_tessface_calc(me); #endif /* Free derived mesh. usually this would happen through depsgraph but there * are exceptions like file save that will not cause this, and we want to * avoid ending up with an invalid derived mesh then. * * Do it for all objects which shares the same mesh datablock, since their * derived meshes might also be referencing data which was just freed, * * Annoying enough, but currently seems most efficient way to avoid access * of freed data on scene update, especially in cases when there are dependency * cycles. */ for (Object *other_object = G.main->object.first; other_object != NULL; other_object = other_object->id.next) { if (other_object->data == ob->data) { BKE_object_free_derived_caches(other_object); } } } /** * Should only be called on the active editmesh, otherwise call #BKE_editmesh_free */ void EDBM_mesh_free(BMEditMesh *em) { /* These tables aren't used yet, so it's not strictly necessary * to 'end' them (with 'e' param) but if someone tries to start * using them, having these in place will save a lot of pain */ ED_mesh_mirror_spatial_table(NULL, NULL, NULL, NULL, 'e'); ED_mesh_mirror_topo_table(NULL, NULL, 'e'); BKE_editmesh_free(em); } void EDBM_selectmode_flush_ex(BMEditMesh *em, const short selectmode) { BM_mesh_select_mode_flush_ex(em->bm, selectmode); } void EDBM_selectmode_flush(BMEditMesh *em) { EDBM_selectmode_flush_ex(em, em->selectmode); } void EDBM_deselect_flush(BMEditMesh *em) { /* function below doesnt use. just do this to keep the values in sync */ em->bm->selectmode = em->selectmode; BM_mesh_deselect_flush(em->bm); } void EDBM_select_flush(BMEditMesh *em) { /* function below doesnt use. just do this to keep the values in sync */ em->bm->selectmode = em->selectmode; BM_mesh_select_flush(em->bm); } void EDBM_select_more(BMEditMesh *em, const bool use_face_step) { BMOperator bmop; const bool use_faces = (em->selectmode == SCE_SELECT_FACE); BMO_op_initf(em->bm, &bmop, BMO_FLAG_DEFAULTS, "region_extend geom=%hvef use_contract=%b use_faces=%b use_face_step=%b", BM_ELEM_SELECT, false, use_faces, use_face_step); BMO_op_exec(em->bm, &bmop); /* don't flush selection in edge/vertex mode */ BMO_slot_buffer_hflag_enable(em->bm, bmop.slots_out, "geom.out", BM_ALL_NOLOOP, BM_ELEM_SELECT, use_faces ? true : false); BMO_op_finish(em->bm, &bmop); EDBM_selectmode_flush(em); } void EDBM_select_less(BMEditMesh *em, const bool use_face_step) { BMOperator bmop; const bool use_faces = (em->selectmode == SCE_SELECT_FACE); BMO_op_initf(em->bm, &bmop, BMO_FLAG_DEFAULTS, "region_extend geom=%hvef use_contract=%b use_faces=%b use_face_step=%b", BM_ELEM_SELECT, true, use_faces, use_face_step); BMO_op_exec(em->bm, &bmop); /* don't flush selection in edge/vertex mode */ BMO_slot_buffer_hflag_disable(em->bm, bmop.slots_out, "geom.out", BM_ALL_NOLOOP, BM_ELEM_SELECT, use_faces ? true : false); BMO_op_finish(em->bm, &bmop); EDBM_selectmode_flush(em); /* only needed for select less, ensure we don't have isolated elements remaining */ BM_mesh_select_mode_clean(em->bm); } void EDBM_flag_disable_all(BMEditMesh *em, const char hflag) { BM_mesh_elem_hflag_disable_all(em->bm, BM_VERT | BM_EDGE | BM_FACE, hflag, false); } void EDBM_flag_enable_all(BMEditMesh *em, const char hflag) { BM_mesh_elem_hflag_enable_all(em->bm, BM_VERT | BM_EDGE | BM_FACE, hflag, true); } /** * Return a new UVVertMap from the editmesh */ UvVertMap *BM_uv_vert_map_create( BMesh *bm, const float limit[2], const bool use_select, const bool use_winding) { BMVert *ev; BMFace *efa; BMLoop *l; BMIter iter, liter; /* vars from original func */ UvVertMap *vmap; UvMapVert *buf; /* MTexPoly *tf; */ /* UNUSED */ MLoopUV *luv; unsigned int a; int totverts, i, totuv, totfaces; const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV); bool *winding = NULL; BLI_buffer_declare_static(vec2f, tf_uv_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE); BM_mesh_elem_index_ensure(bm, BM_VERT | BM_FACE); totfaces = bm->totface; totverts = bm->totvert; totuv = 0; /* generate UvMapVert array */ BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) { if ((use_select == false) || BM_elem_flag_test(efa, BM_ELEM_SELECT)) { totuv += efa->len; } } if (totuv == 0) { return NULL; } vmap = (UvVertMap *)MEM_callocN(sizeof(*vmap), "UvVertMap"); if (!vmap) { return NULL; } vmap->vert = (UvMapVert **)MEM_callocN(sizeof(*vmap->vert) * totverts, "UvMapVert_pt"); buf = vmap->buf = (UvMapVert *)MEM_callocN(sizeof(*vmap->buf) * totuv, "UvMapVert"); if (use_winding) { winding = MEM_callocN(sizeof(*winding) * totfaces, "winding"); } if (!vmap->vert || !vmap->buf) { BKE_mesh_uv_vert_map_free(vmap); return NULL; } BM_ITER_MESH_INDEX (efa, &iter, bm, BM_FACES_OF_MESH, a) { if ((use_select == false) || BM_elem_flag_test(efa, BM_ELEM_SELECT)) { float (*tf_uv)[2]; if (use_winding) { tf_uv = (float (*)[2])BLI_buffer_reinit_data(&tf_uv_buf, vec2f, efa->len); } BM_ITER_ELEM_INDEX(l, &liter, efa, BM_LOOPS_OF_FACE, i) { buf->tfindex = i; buf->f = a; buf->separate = 0; buf->next = vmap->vert[BM_elem_index_get(l->v)]; vmap->vert[BM_elem_index_get(l->v)] = buf; buf++; if (use_winding) { luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset); copy_v2_v2(tf_uv[i], luv->uv); } } if (use_winding) { winding[a] = cross_poly_v2(tf_uv, efa->len) > 0; } } } /* sort individual uvs for each vert */ BM_ITER_MESH_INDEX (ev, &iter, bm, BM_VERTS_OF_MESH, a) { UvMapVert *newvlist = NULL, *vlist = vmap->vert[a]; UvMapVert *iterv, *v, *lastv, *next; float *uv, *uv2, uvdiff[2]; while (vlist) { v = vlist; vlist = vlist->next; v->next = newvlist; newvlist = v; efa = BM_face_at_index(bm, v->f); /* tf = CustomData_bmesh_get(&bm->pdata, efa->head.data, CD_MTEXPOLY); */ /* UNUSED */ l = BM_iter_at_index(bm, BM_LOOPS_OF_FACE, efa, v->tfindex); luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset); uv = luv->uv; lastv = NULL; iterv = vlist; while (iterv) { next = iterv->next; efa = BM_face_at_index(bm, iterv->f); /* tf = CustomData_bmesh_get(&bm->pdata, efa->head.data, CD_MTEXPOLY); */ /* UNUSED */ l = BM_iter_at_index(bm, BM_LOOPS_OF_FACE, efa, iterv->tfindex); luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset); uv2 = luv->uv; sub_v2_v2v2(uvdiff, uv2, uv); if (fabsf(uvdiff[0]) < limit[0] && fabsf(uvdiff[1]) < limit[1] && (!use_winding || winding[iterv->f] == winding[v->f])) { if (lastv) lastv->next = next; else vlist = next; iterv->next = newvlist; newvlist = iterv; } else { lastv = iterv; } iterv = next; } newvlist->separate = 1; } vmap->vert[a] = newvlist; } if (use_winding) { MEM_freeN(winding); } BLI_buffer_free(&tf_uv_buf); return vmap; } UvMapVert *BM_uv_vert_map_at_index(UvVertMap *vmap, unsigned int v) { return vmap->vert[v]; } /* A specialized vert map used by stitch operator */ UvElementMap *BM_uv_element_map_create( BMesh *bm, const bool selected, const bool use_winding, const bool do_islands) { BMVert *ev; BMFace *efa; BMLoop *l; BMIter iter, liter; /* vars from original func */ UvElementMap *element_map; UvElement *buf; bool *winding; BLI_buffer_declare_static(vec2f, tf_uv_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE); MLoopUV *luv; int totverts, totfaces, i, totuv, j; const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV); BM_mesh_elem_index_ensure(bm, BM_VERT | BM_FACE); totfaces = bm->totface; totverts = bm->totvert; totuv = 0; /* generate UvElement array */ BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) { if (!selected || BM_elem_flag_test(efa, BM_ELEM_SELECT)) { totuv += efa->len; } } if (totuv == 0) { return NULL; } element_map = (UvElementMap *)MEM_callocN(sizeof(*element_map), "UvElementMap"); element_map->totalUVs = totuv; element_map->vert = (UvElement **)MEM_callocN(sizeof(*element_map->vert) * totverts, "UvElementVerts"); buf = element_map->buf = (UvElement *)MEM_callocN(sizeof(*element_map->buf) * totuv, "UvElement"); if (use_winding) { winding = MEM_mallocN(sizeof(*winding) * totfaces, "winding"); } BM_ITER_MESH_INDEX (efa, &iter, bm, BM_FACES_OF_MESH, j) { if (use_winding) { winding[j] = false; } if (!selected || BM_elem_flag_test(efa, BM_ELEM_SELECT)) { float (*tf_uv)[2]; if (use_winding) { tf_uv = (float (*)[2])BLI_buffer_reinit_data(&tf_uv_buf, vec2f, efa->len); } BM_ITER_ELEM_INDEX (l, &liter, efa, BM_LOOPS_OF_FACE, i) { buf->l = l; buf->separate = 0; buf->island = INVALID_ISLAND; buf->tfindex = i; buf->next = element_map->vert[BM_elem_index_get(l->v)]; element_map->vert[BM_elem_index_get(l->v)] = buf; if (use_winding) { luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset); copy_v2_v2(tf_uv[i], luv->uv); } buf++; } if (use_winding) { winding[j] = cross_poly_v2(tf_uv, efa->len) > 0; } } } /* sort individual uvs for each vert */ BM_ITER_MESH_INDEX (ev, &iter, bm, BM_VERTS_OF_MESH, i) { UvElement *newvlist = NULL, *vlist = element_map->vert[i]; UvElement *iterv, *v, *lastv, *next; float *uv, *uv2, uvdiff[2]; while (vlist) { v = vlist; vlist = vlist->next; v->next = newvlist; newvlist = v; l = v->l; luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset); uv = luv->uv; lastv = NULL; iterv = vlist; while (iterv) { next = iterv->next; l = iterv->l; luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset); uv2 = luv->uv; sub_v2_v2v2(uvdiff, uv2, uv); if (fabsf(uvdiff[0]) < STD_UV_CONNECT_LIMIT && fabsf(uvdiff[1]) < STD_UV_CONNECT_LIMIT && (!use_winding || winding[BM_elem_index_get(iterv->l->f)] == winding[BM_elem_index_get(v->l->f)])) { if (lastv) lastv->next = next; else vlist = next; iterv->next = newvlist; newvlist = iterv; } else { lastv = iterv; } iterv = next; } newvlist->separate = 1; } element_map->vert[i] = newvlist; } if (use_winding) { MEM_freeN(winding); } if (do_islands) { unsigned int *map; BMFace **stack; int stacksize = 0; UvElement *islandbuf; /* island number for faces */ int *island_number = NULL; int nislands = 0, islandbufsize = 0; /* map holds the map from current vmap->buf to the new, sorted map */ map = MEM_mallocN(sizeof(*map) * totuv, "uvelement_remap"); stack = MEM_mallocN(sizeof(*stack) * bm->totface, "uv_island_face_stack"); islandbuf = MEM_callocN(sizeof(*islandbuf) * totuv, "uvelement_island_buffer"); island_number = MEM_mallocN(sizeof(*island_number) * totfaces, "uv_island_number_face"); copy_vn_i(island_number, totfaces, INVALID_ISLAND); /* at this point, every UvElement in vert points to a UvElement sharing the same vertex. Now we should sort uv's in islands. */ for (i = 0; i < totuv; i++) { if (element_map->buf[i].island == INVALID_ISLAND) { element_map->buf[i].island = nislands; stack[0] = element_map->buf[i].l->f; island_number[BM_elem_index_get(stack[0])] = nislands; stacksize = 1; while (stacksize > 0) { efa = stack[--stacksize]; BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) { UvElement *element, *initelement = element_map->vert[BM_elem_index_get(l->v)]; for (element = initelement; element; element = element->next) { if (element->separate) initelement = element; if (element->l->f == efa) { /* found the uv corresponding to our face and vertex. Now fill it to the buffer */ element->island = nislands; map[element - element_map->buf] = islandbufsize; islandbuf[islandbufsize].l = element->l; islandbuf[islandbufsize].separate = element->separate; islandbuf[islandbufsize].tfindex = element->tfindex; islandbuf[islandbufsize].island = nislands; islandbufsize++; for (element = initelement; element; element = element->next) { if (element->separate && element != initelement) break; if (island_number[BM_elem_index_get(element->l->f)] == INVALID_ISLAND) { stack[stacksize++] = element->l->f; island_number[BM_elem_index_get(element->l->f)] = nislands; } } break; } } } } nislands++; } } MEM_freeN(island_number); /* remap */ for (i = 0; i < bm->totvert; i++) { /* important since we may do selection only. Some of these may be NULL */ if (element_map->vert[i]) element_map->vert[i] = &islandbuf[map[element_map->vert[i] - element_map->buf]]; } element_map->islandIndices = MEM_callocN(sizeof(*element_map->islandIndices) * nislands, "UvElementMap_island_indices"); j = 0; for (i = 0; i < totuv; i++) { UvElement *element = element_map->buf[i].next; if (element == NULL) islandbuf[map[i]].next = NULL; else islandbuf[map[i]].next = &islandbuf[map[element - element_map->buf]]; if (islandbuf[i].island != j) { j++; element_map->islandIndices[j] = i; } } MEM_freeN(element_map->buf); element_map->buf = islandbuf; element_map->totalIslands = nislands; MEM_freeN(stack); MEM_freeN(map); } BLI_buffer_free(&tf_uv_buf); return element_map; } void BM_uv_vert_map_free(UvVertMap *vmap) { if (vmap) { if (vmap->vert) MEM_freeN(vmap->vert); if (vmap->buf) MEM_freeN(vmap->buf); MEM_freeN(vmap); } } void BM_uv_element_map_free(UvElementMap *element_map) { if (element_map) { if (element_map->vert) MEM_freeN(element_map->vert); if (element_map->buf) MEM_freeN(element_map->buf); if (element_map->islandIndices) MEM_freeN(element_map->islandIndices); MEM_freeN(element_map); } } UvElement *BM_uv_element_get(UvElementMap *map, BMFace *efa, BMLoop *l) { UvElement *element; element = map->vert[BM_elem_index_get(l->v)]; for (; element; element = element->next) if (element->l->f == efa) return element; return NULL; } /* last_sel, use em->act_face otherwise get the last selected face in the editselections * at the moment, last_sel is mainly useful for making sure the space image dosnt flicker */ MTexPoly *EDBM_mtexpoly_active_get(BMEditMesh *em, BMFace **r_act_efa, const bool sloppy, const bool selected) { BMFace *efa = NULL; if (!EDBM_mtexpoly_check(em)) return NULL; efa = BM_mesh_active_face_get(em->bm, sloppy, selected); if (efa) { if (r_act_efa) *r_act_efa = efa; return CustomData_bmesh_get(&em->bm->pdata, efa->head.data, CD_MTEXPOLY); } if (r_act_efa) *r_act_efa = NULL; return NULL; } /* can we edit UV's for this mesh?*/ bool EDBM_mtexpoly_check(BMEditMesh *em) { /* some of these checks could be a touch overkill */ return em && em->bm->totface && CustomData_has_layer(&em->bm->pdata, CD_MTEXPOLY) && CustomData_has_layer(&em->bm->ldata, CD_MLOOPUV); } bool EDBM_vert_color_check(BMEditMesh *em) { /* some of these checks could be a touch overkill */ return em && em->bm->totface && CustomData_has_layer(&em->bm->ldata, CD_MLOOPCOL); } static BMVert *cache_mirr_intptr_as_bmvert(intptr_t *index_lookup, int index) { intptr_t eve_i = index_lookup[index]; return (eve_i == -1) ? NULL : (BMVert *)eve_i; } /** * Mirror editing API, usage: * * \code{.c} * EDBM_verts_mirror_cache_begin(em, ...); * * BM_ITER_MESH (v, &iter, em->bm, BM_VERTS_OF_MESH) { * v_mirror = EDBM_verts_mirror_get(em, v); * e_mirror = EDBM_verts_mirror_get_edge(em, e); * f_mirror = EDBM_verts_mirror_get_face(em, f); * } * * EDBM_verts_mirror_cache_end(em); * \endcode */ /* BM_SEARCH_MAXDIST is too big, copied from 2.6x MOC_THRESH, should become a * preference */ #define BM_SEARCH_MAXDIST_MIRR 0.00002f #define BM_CD_LAYER_ID "__mirror_index" /** * \param em Editmesh. * \param use_self Allow a vertex to point to its self (middle verts). * \param use_select Restrict to selected verts. * \param use_topology Use topology mirror. * \param maxdist Distance for close point test. * \param r_index Optional array to write into, as an alternative to a customdata layer (length of total verts). */ void EDBM_verts_mirror_cache_begin_ex(BMEditMesh *em, const int axis, const bool use_self, const bool use_select, /* extra args */ const bool use_topology, float maxdist, int *r_index) { Mesh *me = (Mesh *)em->ob->data; BMesh *bm = em->bm; BMIter iter; BMVert *v; int cd_vmirr_offset; int i; const float maxdist_sq = SQUARE(maxdist); /* one or the other is used depending if topo is enabled */ KDTree *tree = NULL; MirrTopoStore_t mesh_topo_store = {NULL, -1, -1, -1}; BM_mesh_elem_table_ensure(bm, BM_VERT); if (r_index == NULL) { const char *layer_id = BM_CD_LAYER_ID; em->mirror_cdlayer = CustomData_get_named_layer_index(&bm->vdata, CD_PROP_INT, layer_id); if (em->mirror_cdlayer == -1) { BM_data_layer_add_named(bm, &bm->vdata, CD_PROP_INT, layer_id); em->mirror_cdlayer = CustomData_get_named_layer_index(&bm->vdata, CD_PROP_INT, layer_id); } cd_vmirr_offset = CustomData_get_n_offset(&bm->vdata, CD_PROP_INT, em->mirror_cdlayer - CustomData_get_layer_index(&bm->vdata, CD_PROP_INT)); bm->vdata.layers[em->mirror_cdlayer].flag |= CD_FLAG_TEMPORARY; } BM_mesh_elem_index_ensure(bm, BM_VERT); if (use_topology) { ED_mesh_mirrtopo_init(me, NULL, -1, &mesh_topo_store, true); } else { tree = BLI_kdtree_new(bm->totvert); BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) { BLI_kdtree_insert(tree, i, v->co); } BLI_kdtree_balance(tree); } #define VERT_INTPTR(_v, _i) r_index ? &r_index[_i] : BM_ELEM_CD_GET_VOID_P(_v, cd_vmirr_offset); BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) { BLI_assert(BM_elem_index_get(v) == i); /* temporary for testing, check for selection */ if (use_select && !BM_elem_flag_test(v, BM_ELEM_SELECT)) { /* do nothing */ } else { BMVert *v_mirr; int *idx = VERT_INTPTR(v, i); if (use_topology) { v_mirr = cache_mirr_intptr_as_bmvert(mesh_topo_store.index_lookup, i); } else { int i_mirr; float co[3]; copy_v3_v3(co, v->co); co[axis] *= -1.0f; v_mirr = NULL; i_mirr = BLI_kdtree_find_nearest(tree, co, NULL); if (i_mirr != -1) { BMVert *v_test = BM_vert_at_index(bm, i_mirr); if (len_squared_v3v3(co, v_test->co) < maxdist_sq) { v_mirr = v_test; } } } if (v_mirr && (use_self || (v_mirr != v))) { const int i_mirr = BM_elem_index_get(v_mirr); *idx = i_mirr; idx = VERT_INTPTR(v_mirr, i_mirr); *idx = i; } else { *idx = -1; } } } #undef VERT_INTPTR if (use_topology) { ED_mesh_mirrtopo_free(&mesh_topo_store); } else { BLI_kdtree_free(tree); } } void EDBM_verts_mirror_cache_begin(BMEditMesh *em, const int axis, const bool use_self, const bool use_select, const bool use_topology) { EDBM_verts_mirror_cache_begin_ex(em, axis, use_self, use_select, /* extra args */ use_topology, BM_SEARCH_MAXDIST_MIRR, NULL); } BMVert *EDBM_verts_mirror_get(BMEditMesh *em, BMVert *v) { const int *mirr = CustomData_bmesh_get_layer_n(&em->bm->vdata, v->head.data, em->mirror_cdlayer); BLI_assert(em->mirror_cdlayer != -1); /* invalid use */ if (mirr && *mirr >= 0 && *mirr < em->bm->totvert) { if (!em->bm->vtable) { printf("err: should only be called between " "EDBM_verts_mirror_cache_begin and EDBM_verts_mirror_cache_end"); return NULL; } return em->bm->vtable[*mirr]; } return NULL; } BMEdge *EDBM_verts_mirror_get_edge(BMEditMesh *em, BMEdge *e) { BMVert *v1_mirr = EDBM_verts_mirror_get(em, e->v1); if (v1_mirr) { BMVert *v2_mirr = EDBM_verts_mirror_get(em, e->v2); if (v2_mirr) { return BM_edge_exists(v1_mirr, v2_mirr); } } return NULL; } BMFace *EDBM_verts_mirror_get_face(BMEditMesh *em, BMFace *f) { BMVert **v_mirr_arr = BLI_array_alloca(v_mirr_arr, f->len); BMLoop *l_iter, *l_first; unsigned int i = 0; l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { if ((v_mirr_arr[i++] = EDBM_verts_mirror_get(em, l_iter->v)) == NULL) { return NULL; } } while ((l_iter = l_iter->next) != l_first); return BM_face_exists(v_mirr_arr, f->len); } void EDBM_verts_mirror_cache_clear(BMEditMesh *em, BMVert *v) { int *mirr = CustomData_bmesh_get_layer_n(&em->bm->vdata, v->head.data, em->mirror_cdlayer); BLI_assert(em->mirror_cdlayer != -1); /* invalid use */ if (mirr) { *mirr = -1; } } void EDBM_verts_mirror_cache_end(BMEditMesh *em) { em->mirror_cdlayer = -1; } void EDBM_verts_mirror_apply(BMEditMesh *em, const int sel_from, const int sel_to) { BMIter iter; BMVert *v; BLI_assert((em->bm->vtable != NULL) && ((em->bm->elem_table_dirty & BM_VERT) == 0)); BM_ITER_MESH (v, &iter, em->bm, BM_VERTS_OF_MESH) { if (BM_elem_flag_test(v, BM_ELEM_SELECT) == sel_from) { BMVert *mirr = EDBM_verts_mirror_get(em, v); if (mirr) { if (BM_elem_flag_test(mirr, BM_ELEM_SELECT) == sel_to) { copy_v3_v3(mirr->co, v->co); mirr->co[0] *= -1.0f; } } } } } /* swap is 0 or 1, if 1 it hides not selected */ void EDBM_mesh_hide(BMEditMesh *em, bool swap) { BMIter iter; BMElem *ele; int itermode; char hflag_swap = swap ? BM_ELEM_SELECT : 0; if (em == NULL) return; if (em->selectmode & SCE_SELECT_VERTEX) itermode = BM_VERTS_OF_MESH; else if (em->selectmode & SCE_SELECT_EDGE) itermode = BM_EDGES_OF_MESH; else itermode = BM_FACES_OF_MESH; BM_ITER_MESH (ele, &iter, em->bm, itermode) { if (BM_elem_flag_test(ele, BM_ELEM_SELECT) ^ hflag_swap) BM_elem_hide_set(em->bm, ele, true); } EDBM_selectmode_flush(em); /* original hide flushing comment (OUTDATED): * hide happens on least dominant select mode, and flushes up, not down! (helps preventing errors in subsurf) */ /* - vertex hidden, always means edge is hidden too * - edge hidden, always means face is hidden too * - face hidden, only set face hide * - then only flush back down what's absolute hidden */ } void EDBM_mesh_reveal(BMEditMesh *em) { const char iter_types[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH}; const bool sels[3] = { (em->selectmode & SCE_SELECT_VERTEX) != 0, (em->selectmode & SCE_SELECT_EDGE) != 0, (em->selectmode & SCE_SELECT_FACE) != 0, }; int i; /* Use tag flag to remember what was hidden before all is revealed. * BM_ELEM_HIDDEN --> BM_ELEM_TAG */ #pragma omp parallel for schedule(static) if (em->bm->totvert + em->bm->totedge + em->bm->totface >= BM_OMP_LIMIT) for (i = 0; i < 3; i++) { BMIter iter; BMElem *ele; BM_ITER_MESH (ele, &iter, em->bm, iter_types[i]) { BM_elem_flag_set(ele, BM_ELEM_TAG, BM_elem_flag_test(ele, BM_ELEM_HIDDEN)); } } /* Reveal everything */ EDBM_flag_disable_all(em, BM_ELEM_HIDDEN); /* Select relevant just-revealed elements */ for (i = 0; i < 3; i++) { BMIter iter; BMElem *ele; if (!sels[i]) { continue; } BM_ITER_MESH (ele, &iter, em->bm, iter_types[i]) { if (BM_elem_flag_test(ele, BM_ELEM_TAG)) { BM_elem_select_set(em->bm, ele, true); } } } EDBM_selectmode_flush(em); /* hidden faces can have invalid normals */ EDBM_mesh_normals_update(em); } /* so many tools call these that we better make it a generic function. */ void EDBM_update_generic(BMEditMesh *em, const bool do_tessface, const bool is_destructive) { Object *ob = em->ob; /* order of calling isn't important */ DAG_id_tag_update(ob->data, OB_RECALC_DATA); WM_main_add_notifier(NC_GEOM | ND_DATA, ob->data); if (do_tessface) { BKE_editmesh_tessface_calc(em); } if (is_destructive) { /* TODO. we may be able to remove this now! - Campbell */ // BM_mesh_elem_table_free(em->bm, BM_ALL_NOLOOP); } else { /* in debug mode double check we didn't need to recalculate */ BLI_assert(BM_mesh_elem_table_check(em->bm) == true); } /* don't keep stale derivedMesh data around, see: [#38872] */ BKE_editmesh_free_derivedmesh(em); #ifdef DEBUG { BMEditSelection *ese; for (ese = em->bm->selected.first; ese; ese = ese->next) { BLI_assert(BM_elem_flag_test(ese->ele, BM_ELEM_SELECT)); } } #endif } /* poll call for mesh operators requiring a view3d context */ int EDBM_view3d_poll(bContext *C) { if (ED_operator_editmesh(C) && ED_operator_view3d_active(C)) return 1; return 0; } BMElem *EDBM_elem_from_selectmode(BMEditMesh *em, BMVert *eve, BMEdge *eed, BMFace *efa) { BMElem *ele = NULL; if ((em->selectmode & SCE_SELECT_VERTEX) && eve) { ele = (BMElem *)eve; } else if ((em->selectmode & SCE_SELECT_EDGE) && eed) { ele = (BMElem *)eed; } else if ((em->selectmode & SCE_SELECT_FACE) && efa) { ele = (BMElem *)efa; } return ele; } /** * Used when we want to store a single index for any vert/edge/face. * * Intended for use with operators. */ int EDBM_elem_to_index_any(BMEditMesh *em, BMElem *ele) { BMesh *bm = em->bm; int index = BM_elem_index_get(ele); if (ele->head.htype == BM_VERT) { BLI_assert(!(bm->elem_index_dirty & BM_VERT)); } else if (ele->head.htype == BM_EDGE) { BLI_assert(!(bm->elem_index_dirty & BM_EDGE)); index += bm->totvert; } else if (ele->head.htype == BM_FACE) { BLI_assert(!(bm->elem_index_dirty & BM_FACE)); index += bm->totvert + bm->totedge; } else { BLI_assert(0); } return index; } BMElem *EDBM_elem_from_index_any(BMEditMesh *em, int index) { BMesh *bm = em->bm; if (index < bm->totvert) { return (BMElem *)BM_vert_at_index_find_or_table(bm, index); } index -= bm->totvert; if (index < bm->totedge) { return (BMElem *)BM_edge_at_index_find_or_table(bm, index); } index -= bm->totedge; if (index < bm->totface) { return (BMElem *)BM_face_at_index_find_or_table(bm, index); } return NULL; } /* -------------------------------------------------------------------- */ /* BMBVH functions */ // XXX #if 0 //BMESH_TODO: not implemented yet int BMBVH_VertVisible(BMBVHTree *tree, BMEdge *e, RegionView3D *r3d) { } #endif static BMFace *edge_ray_cast(struct BMBVHTree *tree, const float co[3], const float dir[3], float *r_hitout, BMEdge *e) { BMFace *f = BKE_bmbvh_ray_cast(tree, co, dir, 0.0f, NULL, r_hitout, NULL); if (f && BM_edge_in_face(e, f)) return NULL; return f; } static void scale_point(float c1[3], const float p[3], const float s) { sub_v3_v3(c1, p); mul_v3_fl(c1, s); add_v3_v3(c1, p); } bool BMBVH_EdgeVisible(struct BMBVHTree *tree, BMEdge *e, ARegion *ar, View3D *v3d, Object *obedit) { BMFace *f; float co1[3], co2[3], co3[3], dir1[3], dir2[3], dir3[3]; float origin[3], invmat[4][4]; float epsilon = 0.01f; float end[3]; const float mval_f[2] = {ar->winx / 2.0f, ar->winy / 2.0f}; ED_view3d_win_to_segment(ar, v3d, mval_f, origin, end, false); invert_m4_m4(invmat, obedit->obmat); mul_m4_v3(invmat, origin); copy_v3_v3(co1, e->v1->co); mid_v3_v3v3(co2, e->v1->co, e->v2->co); copy_v3_v3(co3, e->v2->co); scale_point(co1, co2, 0.99); scale_point(co3, co2, 0.99); /* ok, idea is to generate rays going from the camera origin to the * three points on the edge (v1, mid, v2)*/ sub_v3_v3v3(dir1, origin, co1); sub_v3_v3v3(dir2, origin, co2); sub_v3_v3v3(dir3, origin, co3); normalize_v3_length(dir1, epsilon); normalize_v3_length(dir2, epsilon); normalize_v3_length(dir3, epsilon); /* offset coordinates slightly along view vectors, to avoid * hitting the faces that own the edge.*/ add_v3_v3v3(co1, co1, dir1); add_v3_v3v3(co2, co2, dir2); add_v3_v3v3(co3, co3, dir3); normalize_v3(dir1); normalize_v3(dir2); normalize_v3(dir3); /* do three samplings: left, middle, right */ f = edge_ray_cast(tree, co1, dir1, NULL, e); if (f && !edge_ray_cast(tree, co2, dir2, NULL, e)) return true; else if (f && !edge_ray_cast(tree, co3, dir3, NULL, e)) return true; else if (!f) return true; return false; }
GB_binop__bclr_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint16) // C=scalar+B GB (_bind1st__bclr_uint16) // C=scalar+B' GB (_bind1st_tran__bclr_uint16) // C=A+scalar GB (_bind2nd__bclr_uint16) // C=A'+scalar GB (_bind2nd_tran__bclr_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITCLR (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bclr_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
c-omp.c
/* This file contains routines to construct GNU OpenMP constructs, called from parsing in the C and C++ front ends. Copyright (C) 2005 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>, Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "function.h" #include "c-common.h" #include "toplev.h" #include "tree-gimple.h" #include "bitmap.h" #include "langhooks.h" /* Complete a #pragma omp master construct. STMT is the structured-block that follows the pragma. */ tree c_finish_omp_master (tree stmt) { return add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); } /* Complete a #pragma omp critical construct. STMT is the structured-block that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. */ tree c_finish_omp_critical (tree body, tree name) { tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; return add_stmt (stmt); } /* Complete a #pragma omp ordered construct. STMT is the structured-block that follows the pragma. */ tree c_finish_omp_ordered (tree stmt) { return add_stmt (build1 (OMP_ORDERED, void_type_node, stmt)); } /* Complete a #pragma omp barrier construct. */ void c_finish_omp_barrier (void) { tree x; x = built_in_decls[BUILT_IN_GOMP_BARRIER]; x = build_function_call_expr (x, NULL); add_stmt (x); } /* Complete a #pragma omp atomic construct. The expression to be implemented atomically is LHS code= RHS. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC node which should be added to the current statement tree with add_stmt. */ tree c_finish_omp_atomic (enum tree_code code, tree lhs, tree rhs) { tree x, type, addr; if (lhs == error_mark_node || rhs == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error ("invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (ADDR_EXPR, lhs, 0); if (addr == error_mark_node) return error_mark_node; addr = save_expr (addr); if (TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL)) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL); addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (addr, NULL); /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ x = build_modify_expr (lhs, code, rhs); if (x == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ return build2 (OMP_ATOMIC, void_type_node, addr, rhs); } /* Complete a #pragma omp flush construct. We don't do anything with the variable list that the syntax allows. */ void c_finish_omp_flush (void) { tree x; x = built_in_decls[BUILT_IN_SYNCHRONIZE]; x = build_function_call_expr (x, NULL); add_stmt (x); } /* Check and canonicalize #pragma omp for increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { case NOP_EXPR: t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert (TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2 (MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2 (PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2 (PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; default: break; } return error_mark_node; } /* Validate and emit code for the OpenMP directive #pragma omp for. INIT, COND, INCR, BODY and PRE_BODY are the five basic elements of the loop (initialization expression, controlling predicate, increment expression, body of the loop and statements to go before the loop). DECL is the iteration variable. */ tree c_finish_omp_for (location_t locus, tree decl, tree init, tree cond, tree incr, tree body, tree pre_body) { location_t elocus = locus; bool fail = false; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) { error ("%Hinvalid type for iteration variable %qE", &elocus, decl); fail = true; } if (TYPE_UNSIGNED (TREE_TYPE (decl))) warning (0, "%Hiteration variable %qE is unsigned", &elocus, decl); /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error ("%H%qE is not initialized", &elocus, decl); init = integer_zero_node; fail = true; } init = build_modify_expr (decl, NOP_EXPR, init); SET_EXPR_LOCATION (init, elocus); } gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); if (cond == NULL_TREE) { error ("%Hmissing controlling predicate", &elocus); fail = true; } else { bool cond_ok = false; if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1 (NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1 (NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } } if (!cond_ok) { error ("%Hinvalid controlling predicate", &elocus); fail = true; } } if (incr == NULL_TREE) { error ("%Hmissing increment expression", &elocus); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: incr_ok = (TREE_OPERAND (incr, 0) == decl); break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error ("%Hinvalid increment expression", &elocus); fail = true; } } if (fail) return NULL; else { tree t = make_node (OMP_FOR); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = init; OMP_FOR_COND (t) = cond; OMP_FOR_INCR (t) = incr; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; SET_EXPR_LOCATION (t, locus); return add_stmt (t); } } /* Divide CLAUSES into two lists: those that apply to a parallel construct, and those that apply to a work-sharing construct. Place the results in *PAR_CLAUSES and *WS_CLAUSES respectively. In addition, add a nowait clause to the work-sharing list. */ void c_split_parallel_clauses (tree clauses, tree *par_clauses, tree *ws_clauses) { tree next; *par_clauses = NULL; *ws_clauses = build_omp_clause (OMP_CLAUSE_NOWAIT); for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_DEFAULT: OMP_CLAUSE_CHAIN (clauses) = *par_clauses; *par_clauses = clauses; break; case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_ORDERED: OMP_CLAUSE_CHAIN (clauses) = *ws_clauses; *ws_clauses = clauses; break; default: gcc_unreachable (); } } } /* True if OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Variables with const-qualified type having no mutable member are predetermined shared. */ if (TREE_READONLY (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; }
propagatorStaticFunctions.h
#ifndef PROPAGATOR_STATIC_FUNCTIONS_H #define PROPAGATOR_STATIC_FUNCTIONS_H #define MIN(x,y) ((x)<(y)?(x):(y)) template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void applyFirstDerivatives2D_PlusHalf( const long freeSurface, const long nx, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDz, const Type * __restrict__ const inX, const Type * __restrict__ const inZ, Type * __restrict__ outX, Type * __restrict__ outZ, const long BX_2D, const long BZ_2D) { const long nx4 = nx - 4; const long nz4 = nz - 4; // zero output array #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 0; bx < nx; bx += BX_2D) { for (long bz = 0; bz < nz; bz += BZ_2D) { const long kxmax = MIN(bx + BX_2D, nx); const long kzmax = MIN(bz + BZ_2D, nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { outX[kx * nz + kz] = 0; outZ[kx * nz + kz] = 0; } } } } // interior #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_2D) { for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */ const long kxmax = MIN(bx + BX_2D, nx4); const long kzmax = MIN(bz + BZ_2D, nz4); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const Type stencilDx = c8_1 * (- inX[(kx+0) * nz + kz] + inX[(kx+1) * nz + kz]) + c8_2 * (- inX[(kx-1) * nz + kz] + inX[(kx+2) * nz + kz]) + c8_3 * (- inX[(kx-2) * nz + kz] + inX[(kx+3) * nz + kz]) + c8_4 * (- inX[(kx-3) * nz + kz] + inX[(kx+4) * nz + kz]); const Type stencilDz = c8_1 * (- inZ[kx * nz + (kz+0)] + inZ[kx * nz + (kz+1)]) + c8_2 * (- inZ[kx * nz + (kz-1)] + inZ[kx * nz + (kz+2)]) + c8_3 * (- inZ[kx * nz + (kz-2)] + inZ[kx * nz + (kz+3)]) + c8_4 * (- inZ[kx * nz + (kz-3)] + inZ[kx * nz + (kz+4)]); outX[kx * nz + kz] = invDx * stencilDx; outZ[kx * nz + kz] = invDz * stencilDz; } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { // kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative // X and Y derivatives are identically zero // [kx * nz + 0] { const Type stencilDPz0 = c8_1 * (- inZ[kx * nz + 0] + inZ[kx * nz + 1]) + c8_2 * (+ inZ[kx * nz + 1] + inZ[kx * nz + 2]) + c8_3 * (+ inZ[kx * nz + 2] + inZ[kx * nz + 3]) + c8_4 * (+ inZ[kx * nz + 3] + inZ[kx * nz + 4]); const long k = kx * nz + 0; outX[k] = 0; outZ[k] = invDz * stencilDPz0; } // kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative // [kx * nz + 1] { const Type stencilDPx1 = c8_1 * (- inX[(kx+0) * nz + 1] + inX[(kx+1) * nz + 1]) + c8_2 * (- inX[(kx-1) * nz + 1] + inX[(kx+2) * nz + 1]) + c8_3 * (- inX[(kx-2) * nz + 1] + inX[(kx+3) * nz + 1]) + c8_4 * (- inX[(kx-3) * nz + 1] + inX[(kx+4) * nz + 1]); const Type stencilDPz1 = c8_1 * (- inZ[kx * nz + 1] + inZ[kx * nz + 2]) + c8_2 * (- inZ[kx * nz + 0] + inZ[kx * nz + 3]) + c8_3 * (+ inZ[kx * nz + 1] + inZ[kx * nz + 4]) + c8_4 * (+ inZ[kx * nz + 2] + inZ[kx * nz + 5]); const long k = kx * nz + 1; outX[k] = invDx * stencilDPx1; outZ[k] = invDz * stencilDPz1; } // kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative // [kx * nz + 2] { const Type stencilDPx2 = c8_1 * (- inX[(kx+0) * nz + 2] + inX[(kx+1) * nz + 2]) + c8_2 * (- inX[(kx-1) * nz + 2] + inX[(kx+2) * nz + 2]) + c8_3 * (- inX[(kx-2) * nz + 2] + inX[(kx+3) * nz + 2]) + c8_4 * (- inX[(kx-3) * nz + 2] + inX[(kx+4) * nz + 2]); const Type stencilDPz2 = c8_1 * (- inZ[kx * nz + 2] + inZ[kx * nz + 3]) + c8_2 * (- inZ[kx * nz + 1] + inZ[kx * nz + 4]) + c8_3 * (- inZ[kx * nz + 0] + inZ[kx * nz + 5]) + c8_4 * (+ inZ[kx * nz + 1] + inZ[kx * nz + 6]); const long k = kx * nz + 2; outX[k] = invDx * stencilDPx2; outZ[k] = invDz * stencilDPz2; } // kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative // [kx * nz + 3] { const Type stencilDPx3 = c8_1 * (- inX[(kx+0) * nz + 3] + inX[(kx+1) * nz + 3]) + c8_2 * (- inX[(kx-1) * nz + 3] + inX[(kx+2) * nz + 3]) + c8_3 * (- inX[(kx-2) * nz + 3] + inX[(kx+3) * nz + 3]) + c8_4 * (- inX[(kx-3) * nz + 3] + inX[(kx+4) * nz + 3]); const Type stencilDPz3 = c8_1 * (- inZ[kx * nz + 3] + inZ[kx * nz + 4]) + c8_2 * (- inZ[kx * nz + 2] + inZ[kx * nz + 5]) + c8_3 * (- inZ[kx * nz + 1] + inZ[kx * nz + 6]) + c8_4 * (- inZ[kx * nz + 0] + inZ[kx * nz + 7]); const long k = kx * nz + 3; outX[k] = invDx * stencilDPx3; outZ[k] = invDz * stencilDPz3; } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void applyFirstDerivatives2D_MinusHalf( const long freeSurface, const long nx, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDz, const Type * __restrict__ const inX, const Type * __restrict__ const inZ, Type * __restrict__ outX, Type * __restrict__ outZ, const long BX_2D, const long BZ_2D) { const long nx4 = nx - 4; const long nz4 = nz - 4; // zero output array #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 0; bx < nx; bx += BX_2D) { for (long bz = 0; bz < nz; bz += BZ_2D) { const long kxmax = MIN(bx + BX_2D, nx); const long kzmax = MIN(bz + BZ_2D, nz); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { outX[kx * nz + kz] = 0; outZ[kx * nz + kz] = 0; } } } } // interior #pragma omp parallel for collapse(2) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_2D) { for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */ const long kxmax = MIN(bx + BX_2D, nx4); const long kzmax = MIN(bz + BZ_2D, nz4); for (long kx = bx; kx < kxmax; kx++) { #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const Type stencilDx = c8_1 * (- inX[(kx-1) * nz + kz] + inX[(kx+0) * nz + kz]) + c8_2 * (- inX[(kx-2) * nz + kz] + inX[(kx+1) * nz + kz]) + c8_3 * (- inX[(kx-3) * nz + kz] + inX[(kx+2) * nz + kz]) + c8_4 * (- inX[(kx-4) * nz + kz] + inX[(kx+3) * nz + kz]); const Type stencilDz = c8_1 * (- inZ[kx * nz + (kz-1)] + inZ[kx * nz + (kz+0)]) + c8_2 * (- inZ[kx * nz + (kz-2)] + inZ[kx * nz + (kz+1)]) + c8_3 * (- inZ[kx * nz + (kz-3)] + inZ[kx * nz + (kz+2)]) + c8_4 * (- inZ[kx * nz + (kz-4)] + inZ[kx * nz + (kz+3)]); outX[kx * nz + kz] = invDx * stencilDx; outZ[kx * nz + kz] = invDz * stencilDz; } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { // kz = 0 -- at the free surface -- p = 0 // [kx * nz + 0] { const long k = kx * nz + 0; outX[k] = 0; outZ[k] = 0; } // kz = 1 -- one cell below the free surface // [kx * nz + 1] { const Type stencilDPx1 = c8_1 * (- inX[(kx-1) * nz + 1] + inX[(kx+0) * nz + 1]) + c8_2 * (- inX[(kx-2) * nz + 1] + inX[(kx+1) * nz + 1]) + c8_3 * (- inX[(kx-3) * nz + 1] + inX[(kx+2) * nz + 1]) + c8_4 * (- inX[(kx-4) * nz + 1] + inX[(kx+3) * nz + 1]); const Type stencilDPz1 = c8_1 * (- inZ[kx * nz + 0] + inZ[kx * nz + 1]) + c8_2 * (- inZ[kx * nz + 0] + inZ[kx * nz + 2]) + c8_3 * (- inZ[kx * nz + 1] + inZ[kx * nz + 3]) + c8_4 * (- inZ[kx * nz + 2] + inZ[kx * nz + 4]); const long k = kx * nz + 1; outX[k] = invDx * stencilDPx1; outZ[k] = invDz * stencilDPz1; } // kz = 2 -- two cells below the free surface // [kx * nz + 2] { const Type stencilDPx2 = c8_1 * (- inX[(kx-1) * nz + 2] + inX[(kx+0) * nz + 2]) + c8_2 * (- inX[(kx-2) * nz + 2] + inX[(kx+1) * nz + 2]) + c8_3 * (- inX[(kx-3) * nz + 2] + inX[(kx+2) * nz + 2]) + c8_4 * (- inX[(kx-4) * nz + 2] + inX[(kx+3) * nz + 2]); const Type stencilDPz2 = c8_1 * (- inZ[kx * nz + 1] + inZ[kx * nz + 2]) + c8_2 * (- inZ[kx * nz + 0] + inZ[kx * nz + 3]) + c8_3 * (- inZ[kx * nz + 0] + inZ[kx * nz + 4]) + c8_4 * (- inZ[kx * nz + 1] + inZ[kx * nz + 5]); const long k = kx * nz + 2; outX[k] = invDx * stencilDPx2; outZ[k] = invDz * stencilDPz2; } // kz = 3 -- three cells below the free surface // [kx * nz + 3] { const Type stencilDPx3 = c8_1 * (- inX[(kx-1) * nz + 3] + inX[(kx+0) * nz + 3]) + c8_2 * (- inX[(kx-2) * nz + 3] + inX[(kx+1) * nz + 3]) + c8_3 * (- inX[(kx-3) * nz + 3] + inX[(kx+2) * nz + 3]) + c8_4 * (- inX[(kx-4) * nz + 3] + inX[(kx+3) * nz + 3]); const Type stencilDPz3 = c8_1 * (- inZ[kx * nz + 2] + inZ[kx * nz + 3]) + c8_2 * (- inZ[kx * nz + 1] + inZ[kx * nz + 4]) + c8_3 * (- inZ[kx * nz + 0] + inZ[kx * nz + 5]) + c8_4 * (- inZ[kx * nz + 0] + inZ[kx * nz + 6]); const long k = kx * nz + 3; outX[k] = invDx * stencilDPx3; outZ[k] = invDz * stencilDPz3; } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void applyFirstDerivatives3D_PlusHalf( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, Type * __restrict__ inX, Type * __restrict__ inY, Type * __restrict__ inZ, Type * __restrict__ outX, Type * __restrict__ outY, Type * __restrict__ outZ, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { long kindex1 = kx * ny * nz + ky * nz + k; long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); outX[kindex1] = outX[kindex2] = 0; outY[kindex1] = outY[kindex2] = 0; outZ[kindex1] = outZ[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = kx * ny * nz + k * nz + kz; long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; outX[kindex1] = outX[kindex2] = 0; outY[kindex1] = outY[kindex2] = 0; outZ[kindex1] = outZ[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = k * ny * nz + ky * nz + kz; long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; outX[kindex1] = outX[kindex2] = 0; outY[kindex1] = outY[kindex2] = 0; outZ[kindex1] = outZ[kindex2] = 0; } } } // interior #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kynz_kz = + kynz + kz; const Type stencilDx = c8_1 * (- inX[(kx+0) * nynz + kynz_kz] + inX[(kx+1) * nynz + kynz_kz]) + c8_2 * (- inX[(kx-1) * nynz + kynz_kz] + inX[(kx+2) * nynz + kynz_kz]) + c8_3 * (- inX[(kx-2) * nynz + kynz_kz] + inX[(kx+3) * nynz + kynz_kz]) + c8_4 * (- inX[(kx-3) * nynz + kynz_kz] + inX[(kx+4) * nynz + kynz_kz]); const Type stencilDy = c8_1 * (- inY[kxnynz + (ky+0) * nz + kz] + inY[kxnynz + (ky+1) * nz + kz]) + c8_2 * (- inY[kxnynz + (ky-1) * nz + kz] + inY[kxnynz + (ky+2) * nz + kz]) + c8_3 * (- inY[kxnynz + (ky-2) * nz + kz] + inY[kxnynz + (ky+3) * nz + kz]) + c8_4 * (- inY[kxnynz + (ky-3) * nz + kz] + inY[kxnynz + (ky+4) * nz + kz]); const Type stencilDz = c8_1 * (- inZ[kxnynz_kynz + (kz+0)] + inZ[kxnynz_kynz + (kz+1)]) + c8_2 * (- inZ[kxnynz_kynz + (kz-1)] + inZ[kxnynz_kynz + (kz+2)]) + c8_3 * (- inZ[kxnynz_kynz + (kz-2)] + inZ[kxnynz_kynz + (kz+3)]) + c8_4 * (- inZ[kxnynz_kynz + (kz-3)] + inZ[kxnynz_kynz + (kz+4)]); outX[kxnynz_kynz + kz] = invDx * stencilDx; outY[kxnynz_kynz + kz] = invDy * stencilDy; outZ[kxnynz_kynz + kz] = invDz * stencilDz; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; // kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative // X and Y derivatives are identically zero const Type stencilDz0 = c8_1 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 1]) + c8_2 * (+ inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 2]) + c8_3 * (+ inZ[kxnynz_kynz + 2] + inZ[kxnynz_kynz + 3]) + c8_4 * (+ inZ[kxnynz_kynz + 3] + inZ[kxnynz_kynz + 4]); outX[kxnynz_kynz + 0] = 0; outY[kxnynz_kynz + 0] = 0; outZ[kxnynz_kynz + 0] = invDz * stencilDz0; // kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative const Type stencilDx1 = c8_1 * (- inX[(kx+0) * nynz + kynz + 1] + inX[(kx+1) * nynz + kynz + 1]) + c8_2 * (- inX[(kx-1) * nynz + kynz + 1] + inX[(kx+2) * nynz + kynz + 1]) + c8_3 * (- inX[(kx-2) * nynz + kynz + 1] + inX[(kx+3) * nynz + kynz + 1]) + c8_4 * (- inX[(kx-3) * nynz + kynz + 1] + inX[(kx+4) * nynz + kynz + 1]); const Type stencilDy1 = c8_1 * (- inY[kxnynz + (ky+0) * nz + 1] + inY[kxnynz + (ky+1) * nz + 1]) + c8_2 * (- inY[kxnynz + (ky-1) * nz + 1] + inY[kxnynz + (ky+2) * nz + 1]) + c8_3 * (- inY[kxnynz + (ky-2) * nz + 1] + inY[kxnynz + (ky+3) * nz + 1]) + c8_4 * (- inY[kxnynz + (ky-3) * nz + 1] + inY[kxnynz + (ky+4) * nz + 1]); const Type stencilDz1 = c8_1 * (- inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 2]) + c8_2 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 3]) + c8_3 * (+ inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 4]) + c8_4 * (+ inZ[kxnynz_kynz + 2] + inZ[kxnynz_kynz + 5]); outX[kxnynz_kynz + 1] = invDx * stencilDx1; outY[kxnynz_kynz + 1] = invDy * stencilDy1; outZ[kxnynz_kynz + 1] = invDz * stencilDz1; // kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative const Type stencilDx2 = c8_1 * (- inX[(kx+0) * nynz + kynz + 2] + inX[(kx+1) * nynz + kynz + 2]) + c8_2 * (- inX[(kx-1) * nynz + kynz + 2] + inX[(kx+2) * nynz + kynz + 2]) + c8_3 * (- inX[(kx-2) * nynz + kynz + 2] + inX[(kx+3) * nynz + kynz + 2]) + c8_4 * (- inX[(kx-3) * nynz + kynz + 2] + inX[(kx+4) * nynz + kynz + 2]); const Type stencilDy2 = c8_1 * (- inY[kxnynz + (ky+0) * nz + 2] + inY[kxnynz + (ky+1) * nz + 2]) + c8_2 * (- inY[kxnynz + (ky-1) * nz + 2] + inY[kxnynz + (ky+2) * nz + 2]) + c8_3 * (- inY[kxnynz + (ky-2) * nz + 2] + inY[kxnynz + (ky+3) * nz + 2]) + c8_4 * (- inY[kxnynz + (ky-3) * nz + 2] + inY[kxnynz + (ky+4) * nz + 2]); const Type stencilDz2 = c8_1 * (- inZ[kxnynz_kynz + 2] + inZ[kxnynz_kynz + 3]) + c8_2 * (- inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 4]) + c8_3 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 5]) + c8_4 * (+ inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 6]); outX[kxnynz_kynz + 2] = invDx * stencilDx2; outY[kxnynz_kynz + 2] = invDy * stencilDy2; outZ[kxnynz_kynz + 2] = invDz * stencilDz2; // kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative const Type stencilDx3 = c8_1 * (- inX[(kx+0) * nynz + kynz + 3] + inX[(kx+1) * nynz + kynz + 3]) + c8_2 * (- inX[(kx-1) * nynz + kynz + 3] + inX[(kx+2) * nynz + kynz + 3]) + c8_3 * (- inX[(kx-2) * nynz + kynz + 3] + inX[(kx+3) * nynz + kynz + 3]) + c8_4 * (- inX[(kx-3) * nynz + kynz + 3] + inX[(kx+4) * nynz + kynz + 3]); const Type stencilDy3 = c8_1 * (- inY[kxnynz + (ky+0) * nz + 3] + inY[kxnynz + (ky+1) * nz + 3]) + c8_2 * (- inY[kxnynz + (ky-1) * nz + 3] + inY[kxnynz + (ky+2) * nz + 3]) + c8_3 * (- inY[kxnynz + (ky-2) * nz + 3] + inY[kxnynz + (ky+3) * nz + 3]) + c8_4 * (- inY[kxnynz + (ky-3) * nz + 3] + inY[kxnynz + (ky+4) * nz + 3]); const Type stencilDz3 = c8_1 * (- inZ[kxnynz_kynz + 3] + inZ[kxnynz_kynz + 4]) + c8_2 * (- inZ[kxnynz_kynz + 2] + inZ[kxnynz_kynz + 5]) + c8_3 * (- inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 6]) + c8_4 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 7]); outX[kxnynz_kynz + 3] = invDx * stencilDx3; outY[kxnynz_kynz + 3] = invDy * stencilDy3; outZ[kxnynz_kynz + 3] = invDz * stencilDz3; } } } } template<class Type> #if defined(__FUNCTION_CLONES__) __attribute__((target_clones("avx","avx2","avx512f","default"))) #endif inline void applyFirstDerivatives3D_MinusHalf( const long freeSurface, const long nx, const long ny, const long nz, const long nthread, const Type c8_1, const Type c8_2, const Type c8_3, const Type c8_4, const Type invDx, const Type invDy, const Type invDz, Type * __restrict__ inX, Type * __restrict__ inY, Type * __restrict__ inZ, Type * __restrict__ outX, Type * __restrict__ outY, Type * __restrict__ outZ, const long BX_3D, const long BY_3D, const long BZ_3D) { const long nx4 = nx - 4; const long ny4 = ny - 4; const long nz4 = nz - 4; const long nynz = ny * nz; // zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed for (long k = 0; k < 4; k++) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long ky = 0; ky < ny; ky++) { long kindex1 = kx * ny * nz + ky * nz + k; long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k); outX[kindex1] = outX[kindex2] = 0; outY[kindex1] = outY[kindex2] = 0; outZ[kindex1] = outZ[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 0; kx < nx; kx++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = kx * ny * nz + k * nz + kz; long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz; outX[kindex1] = outX[kindex2] = 0; outY[kindex1] = outY[kindex2] = 0; outZ[kindex1] = outZ[kindex2] = 0; } } #pragma omp parallel for num_threads(nthread) schedule(static) for (long ky = 0; ky < ny; ky++) { #pragma omp simd for (long kz = 0; kz < nz; kz++) { long kindex1 = k * ny * nz + ky * nz + kz; long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz; outX[kindex1] = outX[kindex2] = 0; outY[kindex1] = outY[kindex2] = 0; outZ[kindex1] = outZ[kindex2] = 0; } } } // interior #pragma omp parallel for collapse(3) num_threads(nthread) schedule(static) for (long bx = 4; bx < nx4; bx += BX_3D) { for (long by = 4; by < ny4; by += BY_3D) { for (long bz = 4; bz < nz4; bz += BZ_3D) { const long kxmax = MIN(bx + BX_3D, nx4); const long kymax = MIN(by + BY_3D, ny4); const long kzmax = MIN(bz + BZ_3D, nz4); for (long kx = bx; kx < kxmax; kx++) { const long kxnynz = kx * nynz; for (long ky = by; ky < kymax; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; #pragma omp simd for (long kz = bz; kz < kzmax; kz++) { const long kynz_kz = + kynz + kz; const Type stencilDx = c8_1 * (- inX[(kx-1) * nynz + kynz_kz] + inX[(kx+0) * nynz + kynz_kz]) + c8_2 * (- inX[(kx-2) * nynz + kynz_kz] + inX[(kx+1) * nynz + kynz_kz]) + c8_3 * (- inX[(kx-3) * nynz + kynz_kz] + inX[(kx+2) * nynz + kynz_kz]) + c8_4 * (- inX[(kx-4) * nynz + kynz_kz] + inX[(kx+3) * nynz + kynz_kz]); const Type stencilDy = c8_1 * (- inY[kxnynz + (ky-1) * nz + kz] + inY[kxnynz + (ky+0) * nz + kz]) + c8_2 * (- inY[kxnynz + (ky-2) * nz + kz] + inY[kxnynz + (ky+1) * nz + kz]) + c8_3 * (- inY[kxnynz + (ky-3) * nz + kz] + inY[kxnynz + (ky+2) * nz + kz]) + c8_4 * (- inY[kxnynz + (ky-4) * nz + kz] + inY[kxnynz + (ky+3) * nz + kz]); const Type stencilDz = c8_1 * (- inZ[kxnynz_kynz + (kz-1)] + inZ[kxnynz_kynz + (kz+0)]) + c8_2 * (- inZ[kxnynz_kynz + (kz-2)] + inZ[kxnynz_kynz + (kz+1)]) + c8_3 * (- inZ[kxnynz_kynz + (kz-3)] + inZ[kxnynz_kynz + (kz+2)]) + c8_4 * (- inZ[kxnynz_kynz + (kz-4)] + inZ[kxnynz_kynz + (kz+3)]); outX[kxnynz_kynz + kz] = invDx * stencilDx; outY[kxnynz_kynz + kz] = invDy * stencilDy; outZ[kxnynz_kynz + kz] = invDz * stencilDz; } } } } } } // roll on free surface if (freeSurface) { #pragma omp parallel for num_threads(nthread) schedule(static) for (long kx = 4; kx < nx4; kx++) { const long kxnynz = kx * nynz; #pragma omp simd for (long ky = 4; ky < ny4; ky++) { const long kynz = ky * nz; const long kxnynz_kynz = kxnynz + kynz; // kz = 0 -- at the free surface -- p = 0 outX[kxnynz_kynz + 0] = 0; outY[kxnynz_kynz + 0] = 0; outZ[kxnynz_kynz + 0] = 0; // kz = 1 -- one cell below the free surface const Type stencilDx1 = c8_1 * (- inX[(kx-1) * nynz + kynz + 1] + inX[(kx+0) * nynz + kynz + 1]) + c8_2 * (- inX[(kx-2) * nynz + kynz + 1] + inX[(kx+1) * nynz + kynz + 1]) + c8_3 * (- inX[(kx-3) * nynz + kynz + 1] + inX[(kx+2) * nynz + kynz + 1]) + c8_4 * (- inX[(kx-4) * nynz + kynz + 1] + inX[(kx+3) * nynz + kynz + 1]); const Type stencilDy1 = c8_1 * (- inY[kxnynz + (ky-1) * nz + 1] + inY[kxnynz + (ky+0) * nz + 1]) + c8_2 * (- inY[kxnynz + (ky-2) * nz + 1] + inY[kxnynz + (ky+1) * nz + 1]) + c8_3 * (- inY[kxnynz + (ky-3) * nz + 1] + inY[kxnynz + (ky+2) * nz + 1]) + c8_4 * (- inY[kxnynz + (ky-4) * nz + 1] + inY[kxnynz + (ky+3) * nz + 1]); const Type stencilDz1 = c8_1 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 1]) + c8_2 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 2]) + c8_3 * (- inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 3]) + c8_4 * (- inZ[kxnynz_kynz + 2] + inZ[kxnynz_kynz + 4]); outX[kxnynz_kynz + 1] = invDx * stencilDx1; outY[kxnynz_kynz + 1] = invDy * stencilDy1; outZ[kxnynz_kynz + 1] = invDz * stencilDz1; // kz = 2 -- two cells below the free surface const Type stencilDx2 = c8_1 * (- inX[(kx-1) * nynz + kynz + 2] + inX[(kx+0) * nynz + kynz + 2]) + c8_2 * (- inX[(kx-2) * nynz + kynz + 2] + inX[(kx+1) * nynz + kynz + 2]) + c8_3 * (- inX[(kx-3) * nynz + kynz + 2] + inX[(kx+2) * nynz + kynz + 2]) + c8_4 * (- inX[(kx-4) * nynz + kynz + 2] + inX[(kx+3) * nynz + kynz + 2]); const Type stencilDy2 = c8_1 * (- inY[kxnynz + (ky-1) * nz + 2] + inY[kxnynz + (ky+0) * nz + 2]) + c8_2 * (- inY[kxnynz + (ky-2) * nz + 2] + inY[kxnynz + (ky+1) * nz + 2]) + c8_3 * (- inY[kxnynz + (ky-3) * nz + 2] + inY[kxnynz + (ky+2) * nz + 2]) + c8_4 * (- inY[kxnynz + (ky-4) * nz + 2] + inY[kxnynz + (ky+3) * nz + 2]); const Type stencilDz2 = c8_1 * (- inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 2]) + c8_2 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 3]) + c8_3 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 4]) + c8_4 * (- inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 5]); outX[kxnynz_kynz + 2] = invDx * stencilDx2; outY[kxnynz_kynz + 2] = invDy * stencilDy2; outZ[kxnynz_kynz + 2] = invDz * stencilDz2; // kz = 3 -- three cells below the free surface const Type stencilDx3 = c8_1 * (- inX[(kx-1) * nynz + kynz + 3] + inX[(kx+0) * nynz + kynz + 3]) + c8_2 * (- inX[(kx-2) * nynz + kynz + 3] + inX[(kx+1) * nynz + kynz + 3]) + c8_3 * (- inX[(kx-3) * nynz + kynz + 3] + inX[(kx+2) * nynz + kynz + 3]) + c8_4 * (- inX[(kx-4) * nynz + kynz + 3] + inX[(kx+3) * nynz + kynz + 3]); const Type stencilDy3 = c8_1 * (- inY[kxnynz + (ky-1) * nz + 3] + inY[kxnynz + (ky+0) * nz + 3]) + c8_2 * (- inY[kxnynz + (ky-2) * nz + 3] + inY[kxnynz + (ky+1) * nz + 3]) + c8_3 * (- inY[kxnynz + (ky-3) * nz + 3] + inY[kxnynz + (ky+2) * nz + 3]) + c8_4 * (- inY[kxnynz + (ky-4) * nz + 3] + inY[kxnynz + (ky+3) * nz + 3]); const Type stencilDz3 = c8_1 * (- inZ[kxnynz_kynz + 2] + inZ[kxnynz_kynz + 3]) + c8_2 * (- inZ[kxnynz_kynz + 1] + inZ[kxnynz_kynz + 4]) + c8_3 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 5]) + c8_4 * (- inZ[kxnynz_kynz + 0] + inZ[kxnynz_kynz + 6]); outX[kxnynz_kynz + 3] = invDx * stencilDx3; outY[kxnynz_kynz + 3] = invDy * stencilDy3; outZ[kxnynz_kynz + 3] = invDz * stencilDz3; } } } } #endif
GB_binop__isne_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_fp32 // A.*B function (eWiseMult): GB_AemultB__isne_fp32 // A*D function (colscale): GB_AxD__isne_fp32 // D*A function (rowscale): GB_DxB__isne_fp32 // C+=B function (dense accum): GB_Cdense_accumB__isne_fp32 // C+=b function (dense accum): GB_Cdense_accumb__isne_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_fp32 // C=scalar+B GB_bind1st__isne_fp32 // C=scalar+B' GB_bind1st_tran__isne_fp32 // C=A+scalar GB_bind2nd__isne_fp32 // C=A'+scalar GB_bind2nd_tran__isne_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FP32 || GxB_NO_ISNE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isne_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Mapping.h
//===--------- Mapping.h - OpenMP device runtime mapping helpers -- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_MAPPING_H #define OMPTARGET_MAPPING_H #include "Types.h" namespace _OMP { namespace mapping { #pragma omp declare target inline constexpr uint32_t MaxThreadsPerTeam = 1024; #pragma omp end declare target /// Initialize the mapping machinery. void init(bool IsSPMD); /// Return true if the kernel is executed in SPMD mode. bool isSPMDMode(); /// Return true if the kernel is executed in generic mode. bool isGenericMode(); /// Return true if the executing thread is the main thread in generic mode. /// These functions will lookup state and it is required that that is OK for the /// thread and location. See also `isInitialThreadInLevel0` for a stateless /// alternative for certain situations, e.g. during initialization. bool isMainThreadInGenericMode(); bool isMainThreadInGenericMode(bool IsSPMD); /// Return true if this thread is the initial thread in parallel level 0. /// /// The thread for which this returns true should be used for single threaded /// initialization tasks. We pick a special thread to ensure there are no /// races between the initialization and the first read of initialized state. bool isInitialThreadInLevel0(bool IsSPMD); /// Return true if the executing thread has the lowest Id of the active threads /// in the warp. bool isLeaderInWarp(); /// Return a mask describing all active threads in the warp. LaneMaskTy activemask(); /// Return a mask describing all threads with a smaller Id in the warp. LaneMaskTy lanemaskLT(); /// Return a mask describing all threads with a larget Id in the warp. LaneMaskTy lanemaskGT(); /// Return the thread Id in the warp, in [0, getWarpSize()). uint32_t getThreadIdInWarp(); /// Return the thread Id in the block, in [0, getBlockSize()). uint32_t getThreadIdInBlock(); /// Return the warp id in the block. uint32_t getWarpId(); /// Return the warp size, thus number of threads in the warp. uint32_t getWarpSize(); /// Return the number of warps in the block. uint32_t getNumberOfWarpsInBlock(); /// Return the block Id in the kernel, in [0, getKernelSize()). uint32_t getBlockId(); /// Return the block size, thus number of threads in the block. uint32_t getBlockSize(); /// Return the number of blocks in the kernel. uint32_t getNumberOfBlocks(); /// Return the kernel size, thus number of threads in the kernel. uint32_t getKernelSize(); /// Return the number of processing elements on the device. uint32_t getNumberOfProcessorElements(); } // namespace mapping } // namespace _OMP #endif
window_layer.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : window_layer.c * Description : multi layer window * * + This is part of libaroma, an embedded ui toolkit. * + 07/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_window_layer_c__ #define __libaroma_window_layer_c__ #include <aroma_internal.h> #include "ui_internal.h" #ifdef __cplusplus extern "C" { #endif /* max touch x dp to trigger sidebar */ #define _LIBAROMA_LAYER_SIDEBAR_TOUCH_DP 16 /* max touch x dp to trigger pulldown */ #define _LIBAROMA_LAYER_PULLDOWN_TOUCH_DP 16 /* root window client data */ typedef struct{ LIBAROMA_WINDOWP win; LIBAROMA_WINDOWP sidebar; LIBAROMA_WINDOWP pulldown; byte sidebar_showed; byte pulldown_showed; int sidebar_xpos; int sidebar_req_x; int pulldown_ypos; int pulldown_req_y; int sidebar_velocity; int pulldown_velocity; byte pulldown_drawtype; byte redraw; LIBAROMA_MUTEX mutex; LIBAROMA_CANVASP tdc; byte on_direct_canvas; byte (*ori_ui_thread)(LIBAROMA_WINDOWP); byte pd_touched; byte sb_touched; int touch_x; int touch_y; byte pd_allow_scroll; byte sb_allow_scroll; long pd_client_touch_start; long sb_client_touch_start; LIBAROMA_MSG pd_pretouched_msg; LIBAROMA_MSG sb_pretouched_msg; LIBAROMA_CONTROLP pd_pretouched; LIBAROMA_CONTROLP sb_pretouched; LIBAROMA_WINDOW_SIDEBAR_SLIDE_CB slide_cb; LIBAROMA_WINDOW_PULLDOWN_SLIDE_CB pulldown_slide_cb; LIBAROMA_FLING pd_fling; LIBAROMA_FLING sb_fling; } _LIBAROMA_WINDOW_LAYER, *_LIBAROMA_WINDOW_LAYERP; /* window handler */ byte _libaroma_window_layer_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h); void _libaroma_window_layer_postfree(LIBAROMA_WINDOWP win); LIBAROMA_CANVASP _libaroma_window_layer_control_draw_begin( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ); byte _libaroma_window_layer_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ); byte _libaroma_window_layer_updatedc(LIBAROMA_WINDOWP win); static LIBAROMA_WINDOW_HANDLER _libaroma_window_layer_handler={ prefree:NULL, postfree:_libaroma_window_layer_postfree, updatebg:NULL, invalidate:NULL, sync:_libaroma_window_layer_sync, message_hooker:_libaroma_window_layer_message_hooker, control_draw_flush:NULL, control_erasebg:NULL, control_isvisible:NULL, control_draw_begin:_libaroma_window_layer_control_draw_begin }; /* * Function : _libaroma_window_layer_check * Return Value: _LIBAROMA_WINDOW_LAYERP * Descriptions: get root window client data */ static inline _LIBAROMA_WINDOW_LAYERP _libaroma_window_layer_check( LIBAROMA_WINDOWP win){ if (!win){ return NULL; } if (win->parent!=NULL){ return NULL; } if (win->handler!=&_libaroma_window_layer_handler){ return NULL; } return (_LIBAROMA_WINDOW_LAYERP) win->client_data; } /* End of _libaroma_window_layer_check */ /* * Function : _libaroma_window_layer_postfree * Return Value: void * Descriptions: postfree client_data */ void _libaroma_window_layer_postfree(LIBAROMA_WINDOWP win){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return; } if (me->sidebar){ if (me->sidebar->active){ me->sidebar->active=0; LIBAROMA_MSG msgia; msgia.msg=LIBAROMA_MSG_WIN_INACTIVE; int i; for (i=0;i<me->sidebar->childn;i++){ if (me->sidebar->childs[i]->handler->message){ me->sidebar->childs[i]->handler->message( me->sidebar->childs[i], &msgia ); } } } libaroma_mutex_lock(me->mutex); libaroma_window_free(me->sidebar); libaroma_mutex_unlock(me->mutex); } if (me->pulldown){ if (me->pulldown->active){ me->pulldown->active=0; LIBAROMA_MSG msgia; msgia.msg=LIBAROMA_MSG_WIN_INACTIVE; int i; for (i=0;i<me->pulldown->childn;i++){ if (me->pulldown->childs[i]->handler->message){ me->pulldown->childs[i]->handler->message( me->pulldown->childs[i], &msgia ); } } } libaroma_mutex_lock(me->mutex); libaroma_window_free(me->pulldown); libaroma_mutex_unlock(me->mutex); } libaroma_mutex_lock(me->mutex); if (me->tdc){ libaroma_canvas_free(me->tdc); } win->ui_thread=me->ori_ui_thread; win->handler = NULL; win->client_data = NULL; libaroma_mutex_unlock(me->mutex); libaroma_mutex_free(me->mutex); free(me); } /* End of _libaroma_window_layer_postfree */ /* * Function : _libaroma_window_layer_updatedc * Return Value: byte * Descriptions: update nondirect dc */ byte _libaroma_window_layer_updatedc(LIBAROMA_WINDOWP win){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } if (win->active!=1){ return 0; } libaroma_mutex_lock(me->mutex); if (!me->on_direct_canvas){ if (me->tdc){ byte sidebar_draw=0; byte pulldown_draw=0; if (win->active==1){ if (me->sidebar_showed){ if (me->sidebar){ if (me->sidebar_xpos>0){ libaroma_draw_ex( win->dc, me->sidebar->dc, 0, me->sidebar->y, me->sidebar->w-me->sidebar_xpos, 0, me->sidebar_xpos, me->sidebar->h, 0,0xff ); libaroma_draw_ex(win->dc,me->tdc, me->sidebar_xpos,0, me->sidebar_xpos,0, win->dc->w-me->sidebar_xpos,win->dc->h, LIBAROMA_DRAW_TO_BLACK, 245-(150*me->sidebar_xpos/me->sidebar->w) ); sidebar_draw=1; } } } if (me->pulldown_showed){ if (me->pulldown){ if (me->pulldown_ypos>0){ libaroma_draw_ex( //draw pulldown window win->dc, //dest me->pulldown->dc, //src 0, //destx (me->pulldown_drawtype==LIBAROMA_PULLDOWN_CLEAN)? 0:me->pulldown->y, //desty 0, //srcx (me->pulldown_drawtype==LIBAROMA_PULLDOWN_CLEAN)? 0:(me->pulldown->h-me->pulldown_ypos), //srcy win->dc->w, //width (me->pulldown_drawtype==LIBAROMA_PULLDOWN_CLEAN)? me->pulldown_ypos:me->pulldown->h, //height 0,0xff //usealpha, level ); if ((win->dc->h - me->pulldown_ypos) > 0){ libaroma_draw_ex( //draw alpha section win->dc,me->tdc, //dest, src 0,me->pulldown_ypos, //destx, desty 0,me->pulldown_ypos, //srcx, srcy win->dc->w, win->dc->h-me->pulldown_ypos, //width, height (me->pulldown_drawtype==LIBAROMA_PULLDOWN_CLEAN)? 0:LIBAROMA_DRAW_TO_BLACK, //alphaflags (me->pulldown_drawtype==LIBAROMA_PULLDOWN_CLEAN)? 255:(245-(150*me->pulldown_ypos/me->pulldown->h))//alphalevel ); } //printf("drawing window at x %d, y %d (srcx %d, srcy %d) with w %d, h %d\n", // 0, me->pulldown->y, 0, me->pulldown->h-me->pulldown_ypos, // win->dc->w, me->pulldown->h); //printf("pulldown has width %d, height %d, x %d and y %d\n", // win->dc->w, me->pulldown->h, 0, me->pulldown_ypos); pulldown_draw=1; } } } } if (!sidebar_draw && !pulldown_draw){ libaroma_draw(win->dc,me->tdc,0,0,0); } } } libaroma_mutex_unlock(me->mutex); return 1; } /* End of _libaroma_window_layer_updatedc */ /* * Function : _libaroma_window_layer_sync * Return Value: byte * Descriptions: window sync */ byte _libaroma_window_layer_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } if (!win->lock_sync){ if (!libaroma_window_isactive(win)){ ALOGW("libaroma_window_sync win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } _libaroma_window_layer_updatedc(win); /* sync workspace */ libaroma_wm_sync(win->x+x,win->y+y,w,h); } return 1; } /* End of _libaroma_window_layer_sync */ /* * Function : _libaroma_window_layer_control_draw_begin * Return Value: LIBAROMA_CANVASP * Descriptions: get canvas for child control */ LIBAROMA_CANVASP _libaroma_window_layer_control_draw_begin( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return NULL; } LIBAROMA_CANVASP c=NULL; libaroma_mutex_lock(me->mutex); LIBAROMA_CANVASP dc = (me->on_direct_canvas)?win->dc:me->tdc; if (!dc){ libaroma_mutex_unlock(me->mutex); return NULL; } c = libaroma_canvas_area( dc, cctl->x, cctl->y, cctl->w, cctl->h ); libaroma_mutex_unlock(me->mutex); return c; } /* End of _libaroma_window_layer_control_draw_begin */ /* * Function : libaroma_window_layer_direct_canvas * Return Value: byte * Descriptions: set as direct canvas */ byte libaroma_window_layer_direct_canvas(LIBAROMA_WINDOWP win, byte state){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (state){ if (!me->on_direct_canvas){ if ((win->dc)&&(me->tdc)) { libaroma_draw(win->dc,me->tdc,0,0,0); } if (me->tdc){ libaroma_canvas_free(me->tdc); me->tdc=NULL; } me->on_direct_canvas=1; } } else{ if (me->on_direct_canvas){ if (win->dc){ if (!me->tdc){ me->tdc = libaroma_canvas( win->dc->w, win->dc->h ); } if (me->tdc) { libaroma_draw(me->tdc,win->dc,0,0,0); } } me->on_direct_canvas=0; } } libaroma_mutex_unlock(me->mutex); return 1; } /* End of libaroma_window_layer_direct_canvas */ /* * Function : _libaroma_window_layer_set_sidebar_pos * Return Value: byte * Descriptions: set sidebar position */ byte _libaroma_window_layer_set_sidebar_pos(LIBAROMA_WINDOWP win, int x){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (!me->sidebar){ libaroma_mutex_unlock(me->mutex); return 0; } if (x>0){ if (x>me->sidebar->w){ x=me->sidebar->w; } if (!me->sidebar_showed){ if (!me->sidebar->active){ /* activate sidebar */ LIBAROMA_MSG msgr; dword rv=0; msgr.msg=LIBAROMA_MSG_WIN_MEASURED; libaroma_mutex_unlock(me->mutex); me->sidebar->handler->message_hooker(me->sidebar,&msgr,&rv); msgr.msg=LIBAROMA_MSG_WIN_ACTIVE; me->sidebar->handler->message_hooker(me->sidebar,&msgr,&rv); libaroma_window_invalidate(me->sidebar,0); libaroma_mutex_lock(me->mutex); } libaroma_mutex_unlock(me->mutex); libaroma_window_layer_direct_canvas(win,0); libaroma_mutex_lock(me->mutex); me->sidebar_showed=1; } if (me->sidebar_xpos!=x){ me->redraw=1; } if (x==me->sidebar->w){ if (me->sidebar_showed!=2){ me->sidebar_showed=2; } } else if (me->sidebar_showed==2){ me->sidebar_showed=1; } me->sidebar_xpos=x; } else{ if (me->sidebar_showed){ me->sidebar_showed=0; libaroma_mutex_unlock(me->mutex); libaroma_window_layer_direct_canvas(win,1); libaroma_mutex_lock(me->mutex); } if (me->sidebar_xpos!=0){ me->redraw=1; } me->sidebar_xpos=0; } if (me->slide_cb){ me->slide_cb(me->sidebar, me->sidebar_xpos, me->sidebar->w); } libaroma_mutex_unlock(me->mutex); return 1; } /* End of _libaroma_window_layer_set_sidebar_pos */ /* * Function : _libaroma_window_layer_set_pulldown_pos * Return Value: byte * Descriptions: set pulldown position */ byte _libaroma_window_layer_set_pulldown_pos(LIBAROMA_WINDOWP win, int y){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (!me->pulldown){ libaroma_mutex_unlock(me->mutex); return 0; } if (y>0){ if (y>me->pulldown->h){ printf("Touch Y %d is greater than window height %d!\n", y, me->pulldown->h); y=me->pulldown->h; } if (!me->pulldown_showed){ if (!me->pulldown->active){ /* activate pulldown */ LIBAROMA_MSG msgr; dword rv=0; msgr.msg=LIBAROMA_MSG_WIN_MEASURED; libaroma_mutex_unlock(me->mutex); me->pulldown->handler->message_hooker(me->pulldown,&msgr,&rv); msgr.msg=LIBAROMA_MSG_WIN_ACTIVE; me->pulldown->handler->message_hooker(me->pulldown,&msgr,&rv); libaroma_window_invalidate(me->pulldown,0); libaroma_mutex_lock(me->mutex); } libaroma_mutex_unlock(me->mutex); libaroma_window_layer_direct_canvas(win,0); libaroma_mutex_lock(me->mutex); me->pulldown_showed=1; } if (me->pulldown_ypos!=y){ me->redraw=1; } if (y==me->pulldown->y){ if (me->pulldown_showed!=2){ me->pulldown_showed=2; } } else if (me->pulldown_showed==2){ me->pulldown_showed=1; } me->pulldown_ypos=y; } else{ if (me->pulldown_showed){ me->pulldown_showed=0; libaroma_mutex_unlock(me->mutex); libaroma_window_layer_direct_canvas(win,1); libaroma_mutex_lock(me->mutex); } if (me->pulldown_ypos!=0){ me->redraw=1; } me->pulldown_ypos=0; } if (me->pulldown_slide_cb){ me->pulldown_slide_cb(me->pulldown, me->pulldown_ypos, me->pulldown->h); } libaroma_mutex_unlock(me->mutex); return 1; } /* End of _libaroma_window_layer_set_pulldown_pos */ byte _libaroma_window_layer_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } if (win->active==1){ if (!me->pulldown_showed && me->sidebar_showed==2){ if (me->sidebar->handler->message_hooker( me->sidebar, msg, retval )){ return 1; } }/* if (me->sidebar_showed==2){ if (me->sidebar){ } }*/ else if (me->pulldown_showed==2){ if (me->pulldown){ if (me->pulldown->handler->message_hooker( me->pulldown, msg, retval )){ return 1; } } } else if (me->pd_touched==10){ /* cancel any touch event */ if (msg->msg==LIBAROMA_MSG_TOUCH){ return 1; } } else if (me->pulldown || me->sidebar){ if (me->pulldown){ switch (msg->msg){ case LIBAROMA_MSG_TOUCH: { int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win,NULL,&x,&y); if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ if (y<libaroma_dp(_LIBAROMA_LAYER_PULLDOWN_TOUCH_DP) || (y>=(me->pulldown->h-libaroma_dp(20)) && me->pulldown_showed)){ // start showing pulldown printf("Touching to show/hide! y=%d\n", y); libaroma_mutex_lock(me->mutex); me->pulldown_velocity=0; if (y>=(me->pulldown->h-libaroma_dp(20))){ me->pulldown_req_y=me->pulldown->h; } else me->pulldown_req_y=libaroma_dp(15); me->pd_touched=1; me->touch_x=x; me->touch_y=y; libaroma_fling_down(&me->pd_fling, y); libaroma_mutex_unlock(me->mutex); return 1; } } else if (me->pd_touched){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ libaroma_mutex_lock(me->mutex);/* if (y>=(me->pulldown->h-libaroma_dp(20))){ me->pulldown_req_y=me->pulldown->h; libaroma_fling_move(&me->fling, me->pulldown->h); libaroma_mutex_unlock(me->mutex); return 1; }*/ int reqy=y; if (me->pd_touched==2){ reqy=(me->pulldown->h)+(y-me->touch_y); } me->pulldown_req_y=MAX( libaroma_dp(_LIBAROMA_LAYER_PULLDOWN_TOUCH_DP),reqy); libaroma_fling_move(&me->pd_fling, y); libaroma_mutex_unlock(me->mutex); } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ libaroma_mutex_lock(me->mutex); me->pulldown_velocity= ((int) (libaroma_fling_up(&me->pd_fling, y)*1.3)>>8); if (me->pulldown_velocity>=0||y<=(me->pulldown->h)/2){ printf("Swiped up!\n"); me->pulldown_req_y=0; } else{ printf("Swiped down!\n"); me->pulldown_req_y=me->pulldown->h; me->pulldown_velocity=abs(me->pulldown_velocity); } if (me->pulldown_velocity){ /* fix velocity */ int diff = me->pulldown->h; me->pulldown_velocity = MAX(MIN( me->pulldown_velocity, 0.45*diff),0.05*diff); } if (me->pulldown_req_y!=me->pulldown_ypos){ me->pd_touched=10; } libaroma_mutex_unlock(me->mutex); } return 1; } } break; } } if (me->sidebar){ switch (msg->msg){ case LIBAROMA_MSG_TOUCH: { int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win,NULL,&x,&y); if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ if (x<libaroma_dp(_LIBAROMA_LAYER_SIDEBAR_TOUCH_DP)){ libaroma_mutex_lock(me->mutex); me->sidebar_velocity=0; me->sidebar_req_x=libaroma_dp(15); me->sb_touched=1; me->touch_x=x; me->touch_y=y; libaroma_fling_down(&me->sb_fling, x); libaroma_mutex_unlock(me->mutex); return 1; } } else if (me->sb_touched){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ libaroma_mutex_lock(me->mutex); int reqx=x; if (me->sb_touched==2){ reqx=(me->sidebar->w)+(x-me->touch_x); } me->sidebar_req_x=MAX( libaroma_dp(_LIBAROMA_LAYER_SIDEBAR_TOUCH_DP),reqx); libaroma_fling_move(&me->sb_fling, x); libaroma_mutex_unlock(me->mutex); } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ libaroma_mutex_lock(me->mutex); me->sidebar_velocity= ((int) (libaroma_fling_up(&me->sb_fling, x)*1.3)>>8); if (me->sidebar_velocity>=0){ me->sidebar_req_x=0; } else{ me->sidebar_req_x=me->sidebar->w; me->sidebar_velocity=abs(me->sidebar_velocity); } if (me->sidebar_velocity){ /* fix velocity */ int diff = me->sidebar->w; me->sidebar_velocity = MAX(MIN( me->sidebar_velocity, 0.45*diff),0.05*diff); } if (me->sidebar_req_x!=me->sidebar_xpos){ me->sb_touched=10; } libaroma_mutex_unlock(me->mutex); } return 1; } } break; } } } } return 0; } /* * Function : _libaroma_window_layer_ui_thread * Return Value: byte * Descriptions: layered window ui thread */ byte _libaroma_window_layer_ui_thread(LIBAROMA_WINDOWP win) { _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } byte need_sync=0; if (win->active==1){ libaroma_mutex_lock(me->mutex); if ((me->sidebar)&&(me->sidebar_req_x!=-1)){ /* show - hide sidebar */ if (!me->sidebar->active){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win,1); libaroma_mutex_lock(me->mutex); } else if (me->sidebar->w<=0){ /* invalid sidebar */ me->sidebar_req_x=-1; } else if (me->sidebar_req_x!=me->sidebar_xpos){ int move_sz = (me->sidebar_req_x-me->sidebar_xpos); if (me->sidebar_velocity!=0){ me->sidebar_velocity=(me->sidebar_velocity*246)>>8; int minw=MAX(1,0.05*me->sidebar->w); if (me->sidebar_velocity<=minw){ me->sidebar_velocity=minw; } if (move_sz<0){ move_sz = 0-me->sidebar_velocity; } else{ move_sz = me->sidebar_velocity; } } else{ move_sz = (move_sz<<6)>>8; } if (abs(move_sz)<2){ if (me->sidebar_req_x<me->sidebar_xpos){ move_sz=-1; } else{ move_sz=1; } } int target_sz = me->sidebar_xpos+move_sz; if (target_sz>=me->sidebar->w){ if ((me->sb_touched)&&(me->sb_touched!=10)){ target_sz=me->sidebar->w-1; } else{ if (me->sb_touched==10){ me->sb_touched=0; } target_sz=me->sidebar->w; me->sidebar_req_x=-1; me->sidebar_velocity=0; } } else if (target_sz<=0){ if ((me->sb_touched)&&(me->sb_touched!=10)){ target_sz=1; } else{ if (me->sb_touched==10){ me->sb_touched=0; } target_sz=0; me->sidebar_req_x=-1; me->sidebar_velocity=0; } } libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win,target_sz); libaroma_mutex_lock(me->mutex); } } if ((me->pulldown)&&(me->pulldown_req_y!=-1)){ /* show - hide pulldown */ if (!me->pulldown->active){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_pulldown_pos(win,1); libaroma_mutex_lock(me->mutex); } else if (me->pulldown->h<=0){ /* invalid pulldown */ me->pulldown_req_y=-1; } else if (me->pulldown_req_y!=me->pulldown_ypos){ int move_sz = (me->pulldown_req_y-me->pulldown_ypos); if (me->pulldown_velocity!=0){ me->pulldown_velocity=(me->pulldown_velocity*246)>>8; int minh=MAX(1,0.05*me->pulldown->h); if (me->pulldown_velocity<minh){ me->pulldown_velocity=minh; } if (move_sz<0){ move_sz = 0-me->pulldown_velocity; } else{ move_sz = me->pulldown_velocity; } } else{ move_sz = (move_sz<<6)>>8; } if (abs(move_sz)<2){ if (me->pulldown_req_y<me->pulldown_ypos){ move_sz=-1; } else{ move_sz=1; } } int target_sz = me->pulldown_ypos+move_sz; if (target_sz>=me->pulldown->h){ if ((me->pd_touched)&&(me->pd_touched!=10)){ target_sz=me->pulldown->y-1; } else{ if (me->pd_touched==10){ me->pd_touched=0; } target_sz=me->pulldown->h; me->pulldown_req_y=-1; me->pulldown_velocity=0; } } else if (target_sz<=0){ if ((me->pd_touched)&&(me->pd_touched!=10)){ target_sz=1; } else{ if (me->pd_touched==10){ me->pd_touched=0; } target_sz=0; me->pulldown_req_y=-1; me->pulldown_velocity=0; } } libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_pulldown_pos(win,target_sz); libaroma_mutex_lock(me->mutex); } } libaroma_mutex_unlock(me->mutex); if (!me->pulldown_showed && me->sidebar_showed){ if (me->sidebar){ if (me->sidebar->ui_thread){ if (me->sidebar->ui_thread(me->sidebar)){ need_sync=1; } } } } if (me->pulldown_showed){ if (me->pulldown){ if (me->pulldown->ui_thread){ if (me->pulldown->ui_thread(me->pulldown)){ need_sync=1; } } } } } if (me->ori_ui_thread){ if (me->ori_ui_thread(win)){ need_sync = 1; } } libaroma_mutex_lock(me->mutex); if (me->redraw){ need_sync=1; me->redraw=0; } libaroma_mutex_unlock(me->mutex); if (need_sync){ if (win->active==1){ _libaroma_window_layer_updatedc(win); } } return need_sync; } /* End of _libaroma_window_layer_ui_thread */ /* * Function : libaroma_window_layer_init * Return Value: byte * Descriptions: init window as layered window */ byte libaroma_window_layer_init(LIBAROMA_WINDOWP win){ if (!win){ ALOGW("window_root_register win is not valid pointer"); return 0; } if (win->parent){ ALOGW("window_root_register win is not root window"); return 0; } if (win->client_data){ if (win->handler!=&_libaroma_window_layer_handler){ ALOGW("window_root_register window is not valid root window"); return 0; } return 1; } _LIBAROMA_WINDOW_LAYERP me = (_LIBAROMA_WINDOW_LAYERP) calloc(sizeof(_LIBAROMA_WINDOW_LAYER),1); if (!me){ ALOGW("window_root_register cannot alloc internal data"); return 0; } libaroma_mutex_init(me->mutex); libaroma_mutex_lock(me->mutex); me->win = win; me->on_direct_canvas=1; me->ori_ui_thread=win->ui_thread; me->sidebar_req_x=-1; me->pulldown_req_y=-1; win->handler = &_libaroma_window_layer_handler; win->client_data = me; win->ui_thread=_libaroma_window_layer_ui_thread; libaroma_mutex_unlock(me->mutex); return 1; } /* End of libaroma_window_layer_init */ /* * Function : libaroma_window_layer_release * Return Value: byte * Descriptions: release layer window */ byte libaroma_window_layer_release(LIBAROMA_WINDOWP win){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } _libaroma_window_layer_postfree(win); libaroma_window_invalidate(win,1); return 1; } /* End of libaroma_window_layer_release */ /**************************** PULLDOWN ************************************/ /* sidebar window handler */ byte _libaroma_window_pulldown_invalidate(LIBAROMA_WINDOWP win, byte sync); byte _libaroma_window_pulldown_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h); byte _libaroma_window_pulldown_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ); static LIBAROMA_WINDOW_HANDLER _libaroma_window_pulldown_handler={ prefree:NULL, postfree:NULL, updatebg:NULL, invalidate:_libaroma_window_pulldown_invalidate, sync:_libaroma_window_pulldown_sync, message_hooker:_libaroma_window_pulldown_message_hooker, control_draw_flush:NULL, control_erasebg:NULL, control_isvisible:NULL, control_draw_begin:NULL }; /* * Function : _libaroma_window_pulldown_invalidate * Return Value: byte * Descriptions: invalidate */ byte _libaroma_window_pulldown_invalidate(LIBAROMA_WINDOWP win, byte sync){ if (win->handler!=&_libaroma_window_pulldown_handler){ return 0; } if ((win->dc)&&(win->bg)){ libaroma_draw(win->dc,win->bg,0,0,0); /* draw childs */ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_control_draw(win->childs[i], 0); } } if (sync){ return _libaroma_window_pulldown_sync(win,0,0,win->w,win->h); } return 1; } /* End of _libaroma_window_pulldown_invalidate */ /* * Function : _libaroma_window_pulldown_sync * Return Value: byte * Descriptions: sync pulldown */ byte _libaroma_window_pulldown_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h){ if (win->handler!=&_libaroma_window_pulldown_handler){ return 0; } _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } if (me->pulldown_showed){ me->redraw=1; } return 1; } /* End of _libaroma_window_pulldown_sync */ /* * Function : libaroma_window_pulldown_show * Return Value: byte * Descriptions: show/hide pulldown */ byte libaroma_window_pulldown_show(LIBAROMA_WINDOWP win, byte show){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (show){ if (!me->pulldown_showed){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_pulldown_pos(win->parent,1); libaroma_mutex_lock(me->mutex); me->pulldown_req_y=win->h; me->pd_touched=10; libaroma_mutex_unlock(me->mutex); return 1; } } else{ if (me->pulldown_showed){ if (me->pulldown_showed==2){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_pulldown_pos(win->parent,win->h-1); libaroma_mutex_lock(me->mutex); } me->pulldown_req_y=0; me->pd_touched=10; libaroma_mutex_unlock(me->mutex); return 1; } } libaroma_mutex_unlock(me->mutex); return 0; } /* End of libaroma_window_pulldown_show */ /* * Function : _libaroma_window_pulldown_message_hooker * Return Value: byte * Descriptions: pulldown message hooker */ byte _libaroma_window_pulldown_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } byte return_state=0; libaroma_mutex_lock(me->mutex); switch (msg->msg){ case LIBAROMA_MSG_KEY_BACK: { libaroma_mutex_unlock(me->mutex); libaroma_window_pulldown_show(win, 0); libaroma_mutex_lock(me->mutex); return_state=1; goto end_message; } break; case LIBAROMA_MSG_TOUCH: { if (win->parent->active!=1){ return_state=0; goto end_message; } /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ memcpy(&me->pd_pretouched_msg,msg,sizeof(LIBAROMA_MSG)); win->touched = NULL; me->pd_pretouched=NULL; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (y<win->h){ int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ me->pd_pretouched = win->childs[i]; break; } } if (me->pd_pretouched!=NULL){ if (me->pd_pretouched->handler->message){ me->pd_client_touch_start=libaroma_tick(); } else{ me->pd_pretouched=NULL; } } } /* else{ libaroma_mutex_unlock(me->mutex); libaroma_window_sidebar_show(win, 0); libaroma_mutex_lock(me->mutex); }*/ me->pd_allow_scroll=2; me->touch_x=x; me->touch_y=y; libaroma_fling_down(&me->pd_fling, y); } else if (win->touched!=NULL){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ if (win->touched->handler->message){ *retval=win->touched->handler->message(win->touched, msg); } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ if (win->touched->handler->message){ *retval=win->touched->handler->message(win->touched, msg); } win->touched=NULL; } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (y>=win->h){ libaroma_mutex_unlock(me->mutex); libaroma_window_pulldown_show(win, 0); libaroma_mutex_lock(me->mutex); } else if (me->pd_pretouched){ if (me->pd_pretouched->handler->message){ me->pd_pretouched->handler->message( me->pd_pretouched,&me->pd_pretouched_msg); me->pd_pretouched->handler->message( me->pd_pretouched,msg); } me->pd_pretouched=NULL; me->pd_client_touch_start=0; me->pd_allow_scroll=0; me->touch_x=x; me->touch_y=y; me->redraw=1; } } else if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (me->pd_allow_scroll==2){ int move_sz = me->touch_x - x; int move_sz_y = me->touch_y - y; int scrdp=libaroma_dp(24); if ((abs(move_sz_y)>=scrdp)&&(abs(move_sz_y)>=abs(move_sz))){ /* halt the scroll and send to control */ if (me->pd_pretouched){ if (me->pd_pretouched->handler->message){ me->pd_client_touch_start=0; win->touched=me->pd_pretouched; me->pd_pretouched=NULL; win->touched->handler->message( win->touched,&me->pd_pretouched_msg); win->touched->handler->message( win->touched,msg); } else{ me->pd_pretouched=NULL; } me->pd_client_touch_start=0; me->pd_allow_scroll=0; me->touch_x=x; me->touch_y=y; me->redraw=1; } } else if (abs(move_sz)>=scrdp){ me->pd_allow_scroll=1; me->pd_client_touch_start=0; me->pd_pretouched=NULL; win->touched=NULL; me->pulldown_showed=1; me->pd_touched=2; me->touch_x=x; me->touch_y=y; libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_pulldown_pos(win->parent,win->h-1); libaroma_mutex_lock(me->mutex); } } } return_state=1; goto end_message; } break; case LIBAROMA_MSG_WIN_ACTIVE: { if (!win->active){ int i; win->active=1; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } } break; case LIBAROMA_MSG_WIN_RESIZE: { int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } break; case LIBAROMA_MSG_WIN_MEASURED: { /*int target_h = libaroma_window_usedp(2)?win->rh:libaroma_dp(win->rh); target_h = libaroma_window_measure_calculate( target_h,win->rh,win->parent->h,1,0 );*/ win->x=win->y=win->rx=win->ry=win->left=win->top=0; win->ax=win->x; win->ay=win->y; win->w = win->parent->w; win->h = win->parent->h; if (libaroma_window_usedp(2)){ win->rw=win->width=libaroma_px(win->w); win->rh=win->height=libaroma_px(win->h); } else{ win->rw=win->width= win->w; win->rh=win->height= win->h; } if (win->dc){ if ((win->dc->w!=win->w)||(win->dc->h!=win->h)){ libaroma_canvas_free(win->dc); if (win->bg){ libaroma_canvas_free(win->bg); } win->dc=NULL; win->bg=NULL; } } if (!win->dc){ win->dc = libaroma_canvas(win->w,win->h); win->bg = libaroma_canvas(win->w,win->h); libaroma_canvas_setcolor(win->dc,0xffff,0); libaroma_canvas_setcolor(win->bg,0xffff,0); } /* remeasured all childs */ int i; for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } } break; } end_message: libaroma_mutex_unlock(me->mutex); return return_state; } /* End of _libaroma_window_pulldown_message_hooker */ /* * Function : _libaroma_window_pulldown_ui_thread * Return Value: byte * Descriptions: window pulldown ui thread */ byte _libaroma_window_pulldown_ui_thread(LIBAROMA_WINDOWP win) { _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } int i; byte need_sync = 0; if (win->active==1){ /* pretouched */ libaroma_mutex_lock(me->mutex); if ((me->pd_client_touch_start!=0)&& (libaroma_tick()-me->pd_client_touch_start>180)){ me->pd_client_touch_start=0; if (me->pd_pretouched!=NULL){ win->touched=me->pd_pretouched; me->pd_pretouched=NULL; if (win->touched->handler->message){ win->touched->handler->message( win->touched,&me->pd_pretouched_msg); } } } libaroma_mutex_unlock(me->mutex); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (c->handler->thread(c)){ if(libaroma_control_draw(c,0)){ need_sync=1; } } } } } return need_sync; } /* End of _libaroma_window_pulldown_ui_thread */ /* * Function : libaroma_window_pulldown * Return Value: LIBAROMA_WINDOWP * Descriptions: new or get pulldown window */ LIBAROMA_WINDOWP libaroma_window_pulldown(LIBAROMA_WINDOWP win, int height, byte showtype){ if (!libaroma_window_layer_init(win)){ return NULL; } _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return NULL; } if (me->pulldown){ return me->pulldown; } libaroma_mutex_lock(me->mutex); LIBAROMA_WINDOWP pulldown = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!pulldown){ libaroma_mutex_unlock(me->mutex); ALOGW("window_pulldown alloc pulldown data failed"); return NULL; } pulldown->rh = height; pulldown->handler=&_libaroma_window_pulldown_handler; pulldown->parent=win; pulldown->ui_thread=_libaroma_window_pulldown_ui_thread; me->pulldown=pulldown; if (!showtype) me->pulldown_drawtype=LIBAROMA_PULLDOWN_SLIDE; else me->pulldown_drawtype=showtype; me->redraw=1; libaroma_mutex_unlock(me->mutex); return pulldown; } /* End of libaroma_window_pulldown */ /* * Function : libaroma_window_pulldown_onslide * Return Value: byte * Descriptions: set pulldown slide position callback */ byte libaroma_window_pulldown_onslide( LIBAROMA_WINDOWP win, LIBAROMA_WINDOW_PULLDOWN_SLIDE_CB cb){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); me->pulldown_slide_cb = cb; ALOGI("Init pulldown slide callback"); libaroma_mutex_unlock(me->mutex); return 1; } /* End of libaroma_window_pulldown_onslide */ /******************************* SIDEBAR **************************************/ /* sidebar window handler */ byte _libaroma_window_sidebar_invalidate(LIBAROMA_WINDOWP win, byte sync); byte _libaroma_window_sidebar_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h); byte _libaroma_window_sidebar_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ); static LIBAROMA_WINDOW_HANDLER _libaroma_window_sidebar_handler={ prefree:NULL, postfree:NULL, updatebg:NULL, invalidate:_libaroma_window_sidebar_invalidate, sync:_libaroma_window_sidebar_sync, message_hooker:_libaroma_window_sidebar_message_hooker, control_draw_flush:NULL, control_erasebg:NULL, control_isvisible:NULL, control_draw_begin:NULL }; /* * Function : _libaroma_window_sidebar_invalidate * Return Value: byte * Descriptions: invalidate */ byte _libaroma_window_sidebar_invalidate(LIBAROMA_WINDOWP win, byte sync){ if (win->handler!=&_libaroma_window_sidebar_handler){ return 0; } if ((win->dc)&&(win->bg)){ libaroma_draw(win->dc,win->bg,0,0,0); /* draw childs */ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_control_draw(win->childs[i], 0); } } if (sync){ return _libaroma_window_sidebar_sync(win,0,0,win->w,win->h); } return 1; } /* End of _libaroma_window_sidebar_invalidate */ /* * Function : _libaroma_window_sidebar_sync * Return Value: byte * Descriptions: sync sidebar */ byte _libaroma_window_sidebar_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h){ if (win->handler!=&_libaroma_window_sidebar_handler){ return 0; } _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } if (me->sidebar_showed){ me->redraw=1; } return 1; } /* End of _libaroma_window_sidebar_sync */ /* * Function : libaroma_window_sidebar_show * Return Value: byte * Descriptions: show/hide sidebar */ byte libaroma_window_sidebar_show(LIBAROMA_WINDOWP win, byte show){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (show){ if (!me->sidebar_showed){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win->parent,1); libaroma_mutex_lock(me->mutex); me->sidebar_req_x=win->w; me->sb_touched=10; libaroma_mutex_unlock(me->mutex); return 1; } } else{ if (me->sidebar_showed){ if (me->sidebar_showed==2){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win->parent,win->w-1); libaroma_mutex_lock(me->mutex); } me->sidebar_req_x=0; me->sb_touched=10; libaroma_mutex_unlock(me->mutex); return 1; } } libaroma_mutex_unlock(me->mutex); return 0; } /* End of libaroma_window_sidebar_show */ /* * Function : _libaroma_window_sidebar_message_hooker * Return Value: byte * Descriptions: sidebar message hooker */ byte _libaroma_window_sidebar_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } byte return_state=0; libaroma_mutex_lock(me->mutex); switch (msg->msg){ case LIBAROMA_MSG_KEY_BACK: { libaroma_mutex_unlock(me->mutex); libaroma_window_sidebar_show(win, 0); libaroma_mutex_lock(me->mutex); return_state=1; goto end_message; } break; case LIBAROMA_MSG_TOUCH: { if (win->parent->active!=1){ return_state=0; goto end_message; } /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ memcpy(&me->sb_pretouched_msg,msg,sizeof(LIBAROMA_MSG)); win->touched = NULL; me->sb_pretouched=NULL; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (x<win->w){ int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ me->sb_pretouched = win->childs[i]; break; } } if (me->sb_pretouched!=NULL){ if (me->sb_pretouched->handler->message){ me->sb_client_touch_start=libaroma_tick(); } else{ me->sb_pretouched=NULL; } } } /* else{ libaroma_mutex_unlock(me->mutex); libaroma_window_sidebar_show(win, 0); libaroma_mutex_lock(me->mutex); }*/ me->sb_allow_scroll=2; me->touch_x=x; me->touch_y=y; libaroma_fling_down(&me->sb_fling, x); } else if (win->touched!=NULL){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ if (win->touched->handler->message){ *retval=win->touched->handler->message(win->touched, msg); } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ if (win->touched->handler->message){ *retval=win->touched->handler->message(win->touched, msg); } win->touched=NULL; } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (x>=win->w){ libaroma_mutex_unlock(me->mutex); libaroma_window_sidebar_show(win, 0); libaroma_mutex_lock(me->mutex); } else if (me->sb_pretouched){ if (me->sb_pretouched->handler->message){ me->sb_pretouched->handler->message( me->sb_pretouched,&me->sb_pretouched_msg); me->sb_pretouched->handler->message( me->sb_pretouched,msg); } me->sb_pretouched=NULL; me->sb_client_touch_start=0; me->sb_allow_scroll=0; me->touch_x=x; me->touch_y=y; me->redraw=1; } } else if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (me->sb_allow_scroll==2){ int move_sz = me->touch_x - x; int move_sz_y = me->touch_y - y; int scrdp=libaroma_dp(24); if ((abs(move_sz_y)>=scrdp)&&(abs(move_sz_y)>=abs(move_sz))){ /* halt the scroll and send to control */ if (me->sb_pretouched){ if (me->sb_pretouched->handler->message){ me->sb_client_touch_start=0; win->touched=me->sb_pretouched; me->sb_pretouched=NULL; win->touched->handler->message( win->touched,&me->sb_pretouched_msg); win->touched->handler->message( win->touched,msg); } else{ me->sb_pretouched=NULL; } me->sb_client_touch_start=0; me->sb_allow_scroll=0; me->touch_x=x; me->touch_y=y; me->redraw=1; } } else if (abs(move_sz)>=scrdp){ me->sb_allow_scroll=1; me->sb_client_touch_start=0; me->sb_pretouched=NULL; win->touched=NULL; me->sidebar_showed=1; me->sb_touched=2; me->touch_x=x; me->touch_y=y; libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win->parent,win->w-1); libaroma_mutex_lock(me->mutex); } } } return_state=1; goto end_message; } break; case LIBAROMA_MSG_WIN_ACTIVE: { if (!win->active){ int i; win->active=1; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } } break; case LIBAROMA_MSG_WIN_RESIZE: { int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } break; case LIBAROMA_MSG_WIN_MEASURED: { int target_w = libaroma_window_usedp(2)?win->rw:libaroma_dp(win->rw); target_w = libaroma_window_measure_calculate( target_w,win->rw,win->parent->w,1,0 ); int max_target_w = win->parent->w-libaroma_dp(56); if (target_w>max_target_w){ target_w=max_target_w; } win->x=win->y=win->rx=win->ry=win->left=win->top=0; win->ax=win->x; win->ay=win->y; win->w = target_w; win->h = win->parent->h; if (libaroma_window_usedp(2)){ win->rw=win->width=libaroma_px(win->w); win->rh=win->height=libaroma_px(win->h); } else{ win->rw=win->width= win->w; win->rh=win->height= win->h; } if (win->dc){ if ((win->dc->w!=win->w)||(win->dc->h!=win->h)){ libaroma_canvas_free(win->dc); if (win->bg){ libaroma_canvas_free(win->bg); } win->dc=NULL; win->bg=NULL; } } if (!win->dc){ win->dc = libaroma_canvas(win->w,win->h); win->bg = libaroma_canvas(win->w,win->h); libaroma_canvas_setcolor(win->dc,0xffff,0); libaroma_canvas_setcolor(win->bg,0xffff,0); } /* remeasured all childs */ int i; for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } } break; } end_message: libaroma_mutex_unlock(me->mutex); return return_state; } /* End of _libaroma_window_sidebar_message_hooker */ /* * Function : _libaroma_window_sidebar_ui_thread * Return Value: byte * Descriptions: window sidebar ui thread */ byte _libaroma_window_sidebar_ui_thread(LIBAROMA_WINDOWP win) { _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } int i; byte need_sync = 0; if (win->active==1){ /* pretouched */ libaroma_mutex_lock(me->mutex); if ((me->sb_client_touch_start!=0)&& (libaroma_tick()-me->sb_client_touch_start>180)){ me->sb_client_touch_start=0; if (me->sb_pretouched!=NULL){ win->touched=me->sb_pretouched; me->sb_pretouched=NULL; if (win->touched->handler->message){ win->touched->handler->message( win->touched,&me->sb_pretouched_msg); } } } libaroma_mutex_unlock(me->mutex); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (c->handler->thread(c)){ if(libaroma_control_draw(c,0)){ need_sync=1; } } } } } return need_sync; } /* End of _libaroma_window_sidebar_ui_thread */ /* * Function : libaroma_window_sidebar * Return Value: LIBAROMA_WINDOWP * Descriptions: new or get sidebar window */ LIBAROMA_WINDOWP libaroma_window_sidebar(LIBAROMA_WINDOWP win, int width){ if (!libaroma_window_layer_init(win)){ return NULL; } _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return NULL; } if (me->sidebar){ return me->sidebar; } libaroma_mutex_lock(me->mutex); LIBAROMA_WINDOWP sidebar = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!sidebar){ libaroma_mutex_unlock(me->mutex); ALOGW("window_sidebar alloc sidebar data failed"); return NULL; } sidebar->rw = width; sidebar->handler=&_libaroma_window_sidebar_handler; sidebar->parent=win; sidebar->ui_thread=_libaroma_window_sidebar_ui_thread; me->sidebar=sidebar; me->redraw=1; libaroma_mutex_unlock(me->mutex); return sidebar; } /* End of libaroma_window_sidebar */ /* * Function : libaroma_window_sidebar_onslide * Return Value: byte * Descriptions: set sidebar slide position callback */ byte libaroma_window_sidebar_onslide( LIBAROMA_WINDOWP win, LIBAROMA_WINDOW_SIDEBAR_SLIDE_CB cb){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); me->slide_cb = cb; ALOGI("Init sidebar slide callback"); libaroma_mutex_unlock(me->mutex); return 1; } /* End of libaroma_window_sidebar_onslide */ #ifdef __cplusplus } #endif #endif /* __libaroma_window_layer_c__ */
cofold.c
/* Last changed Time-stamp: <2008-12-03 17:44:38 ivo> */ /* minimum free energy RNA secondary structure prediction c Ivo Hofacker, Chrisoph Flamm original implementation by Walter Fontana Vienna RNA package */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include <limits.h> #include "utils.h" #include "energy_par.h" #include "fold_vars.h" #include "pair_mat.h" #include "params.h" #include "subopt.h" #include "fold.h" #include "loop_energies.h" #include "gquad.h" #include "cofold.h" #ifdef _OPENMP #include <omp.h> #endif #define PAREN #define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */ #define NEW_NINIO 1 /* new asymetry penalty */ #define MAXSECTORS 500 /* dimension for a backtrack array */ #define LOCALITY 0. /* locality parameter for base-pairs */ #undef TURN #define TURN 0 /* reset minimal base pair span for intermolecular pairings */ #define TURN2 3 /* used by zukersubopt */ #define SAME_STRAND(I,J) (((I)>=cut_point)||((J)<cut_point)) /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ PRIVATE float mfe1, mfe2; /* minimum free energies of the monomers */ PRIVATE int *indx = NULL; /* index for moving in the triangle matrices c[] and fMl[]*/ PRIVATE int *c = NULL; /* energy array, given that i-j pair */ PRIVATE int *cc = NULL; /* linear array for calculating canonical structures */ PRIVATE int *cc1 = NULL; /* " " */ PRIVATE int *f5 = NULL; /* energy of 5' end */ PRIVATE int *fc = NULL; /* energy from i to cutpoint (and vice versa if i>cut) */ PRIVATE int *fML = NULL; /* multi-loop auxiliary energy array */ PRIVATE int *fM1 = NULL; /* second ML array, only for subopt */ PRIVATE int *Fmi = NULL; /* holds row i of fML (avoids jumps in memory) */ PRIVATE int *DMLi = NULL; /* DMLi[j] holds MIN(fML[i,k]+fML[k+1,j]) */ PRIVATE int *DMLi1 = NULL; /* MIN(fML[i+1,k]+fML[k+1,j]) */ PRIVATE int *DMLi2 = NULL; /* MIN(fML[i+2,k]+fML[k+1,j]) */ PRIVATE char *ptype = NULL; /* precomputed array of pair types */ PRIVATE short *S = NULL, *S1 = NULL; PRIVATE paramT *P = NULL; PRIVATE int init_length = -1; PRIVATE int zuker = 0; /* Do Zuker style suboptimals? */ PRIVATE sect sector[MAXSECTORS]; /* stack for backtracking */ PRIVATE int length; PRIVATE bondT *base_pair2 = NULL; PRIVATE int *BP; /* contains the structure constrainsts: BP[i] -1: | = base must be paired -2: < = base must be paired with j<i -3: > = base must be paired with j>i -4: x = base must not pair positive int: base is paired with int */ PRIVATE int struct_constrained = 0; PRIVATE int with_gquad = 0; PRIVATE int *ggg = NULL; /* minimum free energies of the gquadruplexes */ #ifdef _OPENMP #pragma omp threadprivate(mfe1, mfe2, indx, c, cc, cc1, f5, fc, fML, fM1, Fmi, DMLi, DMLi1, DMLi2,\ ptype, S, S1, P, zuker, sector, length, base_pair2, BP, struct_constrained,\ ggg, with_gquad) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void init_cofold(int length, paramT *parameters); PRIVATE void get_arrays(unsigned int size); /* PRIVATE void scale_parameters(void); */ PRIVATE void make_ptypes(const short *S, const char *structure); PRIVATE void backtrack(const char *sequence); PRIVATE int fill_arrays(const char *sequence); PRIVATE void free_end(int *array, int i, int start); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ /*--------------------------------------------------------------------------*/ PRIVATE void init_cofold(int length, paramT *parameters){ #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif if (length<1) nrerror("init_cofold: argument must be greater 0"); free_co_arrays(); get_arrays((unsigned) length); init_length=length; indx = get_indx((unsigned) length); update_cofold_params_par(parameters); } /*--------------------------------------------------------------------------*/ PRIVATE void get_arrays(unsigned int size){ if(size >= (unsigned int)sqrt((double)INT_MAX)) nrerror("get_arrays@cofold.c: sequence length exceeds addressable range"); c = (int *) space(sizeof(int)*((size*(size+1))/2+2)); fML = (int *) space(sizeof(int)*((size*(size+1))/2+2)); if (uniq_ML) fM1 = (int *) space(sizeof(int)*((size*(size+1))/2+2)); ptype = (char *) space(sizeof(char)*((size*(size+1))/2+2)); f5 = (int *) space(sizeof(int)*(size+2)); fc = (int *) space(sizeof(int)*(size+2)); cc = (int *) space(sizeof(int)*(size+2)); cc1 = (int *) space(sizeof(int)*(size+2)); Fmi = (int *) space(sizeof(int)*(size+1)); DMLi = (int *) space(sizeof(int)*(size+1)); DMLi1 = (int *) space(sizeof(int)*(size+1)); DMLi2 = (int *) space(sizeof(int)*(size+1)); base_pair2 = (bondT *) space(sizeof(bondT)*(1+size/2)); } /*--------------------------------------------------------------------------*/ PUBLIC void free_co_arrays(void){ if(indx) free(indx); if(c) free(c); if(fML) free(fML); if(f5) free(f5); if(cc) free(cc); if(cc1) free(cc1); if(fc) free(fc); if(ptype) free(ptype); if(fM1) free(fM1); if(base_pair2) free(base_pair2); if(Fmi) free(Fmi); if(DMLi) free(DMLi); if(DMLi1) free(DMLi1); if(DMLi2) free(DMLi2); if(P) free(P); if(ggg) free(ggg); indx = c = fML = f5 = cc = cc1 = fc = fM1 = Fmi = DMLi = DMLi1 = DMLi2 = ggg = NULL; ptype = NULL; base_pair2 = NULL; P = NULL; init_length = 0; } /*--------------------------------------------------------------------------*/ PUBLIC void export_cofold_arrays_gq( int **f5_p, int **c_p, int **fML_p, int **fM1_p, int **fc_p, int **ggg_p, int **indx_p, char **ptype_p){ /* make the DP arrays available to routines such as subopt() */ *f5_p = f5; *c_p = c; *fML_p = fML; *fM1_p = fM1; *ggg_p = ggg; *indx_p = indx; *ptype_p = ptype; *fc_p =fc; } PUBLIC void export_cofold_arrays( int **f5_p, int **c_p, int **fML_p, int **fM1_p, int **fc_p, int **indx_p, char **ptype_p){ /* make the DP arrays available to routines such as subopt() */ *f5_p = f5; *c_p = c; *fML_p = fML; *fM1_p = fM1; *indx_p = indx; *ptype_p = ptype; *fc_p =fc; } /*--------------------------------------------------------------------------*/ PUBLIC float cofold(const char *string, char *structure) { return cofold_par(string, structure, NULL, fold_constrained); } PUBLIC float cofold_par(const char *string, char *structure, paramT *parameters, int is_constrained){ int i, length, energy, bonus=0, bonus_cnt=0; zuker = 0; struct_constrained = is_constrained; length = (int) strlen(string); #ifdef _OPENMP /* always init everything since all global static variables are uninitialized when entering a thread */ init_cofold(length, parameters); #else if(parameters) init_cofold(length, parameters); else if (length>init_length) init_cofold(length, parameters); else if (fabs(P->temperature - temperature)>1e-6) update_cofold_params_par(parameters); #endif with_gquad = P->model_details.gquad; S = encode_sequence(string, 0); S1 = encode_sequence(string, 1); S1[0] = S[0]; /* store length at pos. 0 */ BP = (int *)space(sizeof(int)*(length+2)); if(with_gquad){ /* add a guess of how many G's may be involved in a G quadruplex */ if(base_pair2) free(base_pair2); base_pair2 = (bondT *) space(sizeof(bondT)*(4*(1+length/2))); } make_ptypes(S, structure); energy = fill_arrays(string); backtrack(string); #ifdef PAREN parenthesis_structure(structure, base_pair2, length); #else letter_structure(structure, base_pair2, length); #endif /* * Backward compatibility: * This block may be removed if deprecated functions * relying on the global variable "base_pair" vanish from within the package! */ base_pair = base_pair2; /* { if(base_pair) free(base_pair); base_pair = (bondT *)space(sizeof(bondT) * (1+length/2)); memcpy(base_pair, base_pair2, sizeof(bondT) * (1+length/2)); } */ /* check constraints */ for(i=1;i<=length;i++) { if((BP[i]<0)&&(BP[i]>-4)) { bonus_cnt++; if((BP[i]==-3)&&(structure[i-1]==')')) bonus++; if((BP[i]==-2)&&(structure[i-1]=='(')) bonus++; if((BP[i]==-1)&&(structure[i-1]!='.')) bonus++; } if(BP[i]>i) { int l; bonus_cnt++; for(l=1; l<=base_pair2[0].i; l++) if(base_pair2[l].i != base_pair2[l].j) if((i==base_pair2[l].i)&&(BP[i]==base_pair2[l].j)) bonus++; } } if (bonus_cnt>bonus) fprintf(stderr,"\ncould not enforce all constraints\n"); bonus*=BONUS; free(S); free(S1); free(BP); energy += bonus; /*remove bonus energies from result */ if (backtrack_type=='C') return (float) c[indx[length]+1]/100.; else if (backtrack_type=='M') return (float) fML[indx[length]+1]/100.; else return (float) energy/100.; } PRIVATE int fill_arrays(const char *string) { /* fill "c", "fML" and "f5" arrays and return optimal energy */ int i, j, k, length, energy; int decomp, new_fML, max_separation; int no_close, type, type_2, tt, maxj; int bonus=0; int dangle_model = P->model_details.dangles; int noGUclosure = P->model_details.noGUclosure; int noLP = P->model_details.noLP; length = (int) strlen(string); max_separation = (int) ((1.-LOCALITY)*(double)(length-2)); /* not in use */ if(with_gquad) ggg = get_gquad_matrix(S, P); for (j=1; j<=length; j++) { Fmi[j]=DMLi[j]=DMLi1[j]=DMLi2[j]=INF; fc[j]=0; } for (j = 1; j<=length; j++) for (i=1; i<=j; i++) { c[indx[j]+i] = fML[indx[j]+i] = INF; if (uniq_ML) fM1[indx[j]+i] = INF; } for (i = length-TURN-1; i >= 1; i--) { /* i,j in [1..length] */ maxj=(zuker)? (MIN2(i+cut_point-1,length)):length; for (j = i+TURN+1; j <= maxj; j++) { int p, q, ij; ij = indx[j]+i; bonus = 0; type = ptype[ij]; /* enforcing structure constraints */ if ((BP[i]==j)||(BP[i]==-1)||(BP[i]==-2)) bonus -= BONUS; if ((BP[j]==-1)||(BP[j]==-3)) bonus -= BONUS; if ((BP[i]==-4)||(BP[j]==-4)) type=0; no_close = (((type==3)||(type==4))&&noGUclosure&&(bonus==0)); if (j-i-1 > max_separation) type = 0; /* forces locality degree */ if (type) { /* we have a pair */ int new_c=0, stackEnergy=INF; short si, sj; si = SAME_STRAND(i, i+1) ? S1[i+1] : -1; sj = SAME_STRAND(j-1, j) ? S1[j-1] : -1; /* hairpin ----------------------------------------------*/ if (SAME_STRAND(i,j)) { if (no_close) new_c = FORBIDDEN; else new_c = E_Hairpin(j-i-1, type, si, sj, string+i-1, P); } else { if (dangle_model) new_c += E_ExtLoop(rtype[type], sj, si, P); else new_c += E_ExtLoop(rtype[type], -1, -1, P); } /*-------------------------------------------------------- check for elementary structures involving more than one closing pair. --------------------------------------------------------*/ for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1) ; p++) { int minq = j-i+p-MAXLOOP-2; if (minq<p+1+TURN) minq = p+1+TURN; for (q = minq; q < j; q++) { type_2 = ptype[indx[q]+p]; if (type_2==0) continue; type_2 = rtype[type_2]; if (noGUclosure) if (no_close||(type_2==3)||(type_2==4)) if ((p>i+1)||(q<j-1)) continue; /* continue unless stack */ if (SAME_STRAND(i,p) && SAME_STRAND(q,j)) energy = E_IntLoop(p-i-1, j-q-1, type, type_2, si, sj, S1[p-1], S1[q+1], P); else energy = E_IntLoop_Co(rtype[type], rtype[type_2], i, j, p, q, cut_point, si, sj, S1[p-1], S1[q+1], dangle_model, P); new_c = MIN2(energy+c[indx[q]+p], new_c); if ((p==i+1)&&(j==q+1)) stackEnergy = energy; /* remember stack energy */ } /* end q-loop */ } /* end p-loop */ /* multi-loop decomposition ------------------------*/ if (!no_close) { int MLenergy; if((si >= 0) && (sj >= 0)){ decomp = DMLi1[j-1]; tt = rtype[type]; MLenergy = P->MLclosing; switch(dangle_model){ case 0: MLenergy += decomp + E_MLstem(tt, -1, -1, P); break; case 2: MLenergy += decomp + E_MLstem(tt, sj, si, P); break; default: decomp += E_MLstem(tt, -1, -1, P); decomp = MIN2(decomp, DMLi1[j-2] + E_MLstem(tt, sj, -1, P) + P->MLbase); decomp = MIN2(decomp, DMLi2[j-1] + E_MLstem(tt, -1, si, P) + P->MLbase); decomp = MIN2(decomp, DMLi2[j-2] + E_MLstem(tt, sj, si, P) + 2*P->MLbase); MLenergy += decomp; break; } new_c = MIN2(new_c, MLenergy); } if (!SAME_STRAND(i,j)) { /* cut is somewhere in the multiloop*/ decomp = fc[i+1] + fc[j-1]; tt = rtype[type]; switch(dangle_model){ case 0: decomp += E_ExtLoop(tt, -1, -1, P); break; case 2: decomp += E_ExtLoop(tt, sj, si, P); break; default: decomp += E_ExtLoop(tt, -1, -1, P); decomp = MIN2(decomp, fc[i+2] + fc[j-2] + E_ExtLoop(tt, sj, si, P)); decomp = MIN2(decomp, fc[i+2] + fc[j-1] + E_ExtLoop(tt, -1, si, P)); decomp = MIN2(decomp, fc[i+1] + fc[j-2] + E_ExtLoop(tt, sj, -1, P)); break; } new_c = MIN2(new_c, decomp); } } /* end >> if (!no_close) << */ /* coaxial stacking of (i.j) with (i+1.k) or (k+1.j-1) */ if (dangle_model==3) { decomp = INF; for (k = i+2+TURN; k < j-2-TURN; k++) { type_2 = ptype[indx[k]+i+1]; type_2 = rtype[type_2]; if (type_2) decomp = MIN2(decomp, c[indx[k]+i+1]+P->stack[type][type_2]+ fML[indx[j-1]+k+1]); type_2 = ptype[indx[j-1]+k+1]; type_2 = rtype[type_2]; if (type_2) decomp = MIN2(decomp, c[indx[j-1]+k+1]+P->stack[type][type_2]+ fML[indx[k]+i+1]); } /* no TermAU penalty if coax stack */ decomp += 2*P->MLintern[1] + P->MLclosing; new_c = MIN2(new_c, decomp); } if(with_gquad){ /* include all cases where a g-quadruplex may be enclosed by base pair (i,j) */ if (!no_close && SAME_STRAND(i,j)) { tt = rtype[type]; energy = E_GQuad_IntLoop(i, j, type, S1, ggg, indx, P); new_c = MIN2(new_c, energy); } } new_c = MIN2(new_c, cc1[j-1]+stackEnergy); cc[j] = new_c + bonus; if (noLP){ if (SAME_STRAND(i,i+1) && SAME_STRAND(j-1,j)) c[ij] = cc1[j-1]+stackEnergy+bonus; else /* currently we don't allow stacking over the cut point */ c[ij] = FORBIDDEN; } else c[ij] = cc[j]; } /* end >> if (pair) << */ else c[ij] = INF; /* done with c[i,j], now compute fML[i,j] */ /* free ends ? -----------------------------------------*/ new_fML=INF; if (SAME_STRAND(i-1,i)) { if (SAME_STRAND(i,i+1)) new_fML = fML[ij+1]+P->MLbase; if (SAME_STRAND(j-1,j)) new_fML = MIN2(fML[indx[j-1]+i]+P->MLbase, new_fML); if (SAME_STRAND(j,j+1)) { energy = c[ij]; if(dangle_model == 2) energy += E_MLstem(type,(i>1) ? S1[i-1] : -1, (j<length) ? S1[j+1] : -1, P); else energy += E_MLstem(type, -1, -1, P); new_fML = MIN2(new_fML, energy); if(with_gquad){ int gggg = ggg[ij] + E_MLstem(0, -1, -1, P); energy = MIN2(energy, gggg); new_fML = MIN2(new_fML, energy); } if(uniq_ML){ fM1[ij] = energy; if(SAME_STRAND(j-1,j)) fM1[ij] = MIN2(energy, fM1[indx[j-1]+i] + P->MLbase); } } if (dangle_model%2==1) { /* normal dangles */ if (SAME_STRAND(i,i+1)) { tt = ptype[ij+1]; /* i+1,j */ new_fML = MIN2(new_fML, c[ij+1] + P->MLbase + E_MLstem(tt, S1[i], -1, P)); } if (SAME_STRAND(j-1,j)) { tt = ptype[indx[j-1]+i]; /* i,j-1 */ new_fML = MIN2(new_fML, c[indx[j-1]+i] + P->MLbase + E_MLstem(tt, -1, S1[j], P)); } if ((SAME_STRAND(j-1,j))&&(SAME_STRAND(i,i+1))) { tt = ptype[indx[j-1]+i+1]; /* i+1,j-1 */ new_fML = MIN2(new_fML, c[indx[j-1]+i+1] + 2*P->MLbase + E_MLstem(tt, S1[i], S1[j], P)); } } } if(with_gquad){ if(SAME_STRAND(i, j)) new_fML = MIN2(new_fML, ggg[indx[j] + i] + E_MLstem(0, -1, -1, P)); } /* modular decomposition -------------------------------*/ { int stopp; /*loop 1 up to cut, then loop 2*/ stopp=(cut_point>0)? (cut_point):(j-2-TURN); for (decomp=INF, k = i+1+TURN; k<stopp; k++) decomp = MIN2(decomp, Fmi[k]+fML[indx[j]+k+1]); k++; for (;k <= j-2-TURN;k++) decomp = MIN2(decomp, Fmi[k]+fML[indx[j]+k+1]); } DMLi[j] = decomp; /* store for use in ML decompositon */ new_fML = MIN2(new_fML,decomp); /* coaxial stacking */ if (dangle_model==3) { int stopp; stopp=(cut_point>0)? (cut_point):(j-2-TURN); /* additional ML decomposition as two coaxially stacked helices */ for (decomp = INF, k = i+1+TURN; k<stopp; k++) { type = ptype[indx[k]+i]; type = rtype[type]; type_2 = ptype[indx[j]+k+1]; type_2 = rtype[type_2]; if (type && type_2) decomp = MIN2(decomp, c[indx[k]+i]+c[indx[j]+k+1]+P->stack[type][type_2]); } k++; for (;k <= j-2-TURN; k++) { type = ptype[indx[k]+i]; type = rtype[type]; type_2 = ptype[indx[j]+k+1]; type_2 = rtype[type_2]; if (type && type_2) decomp = MIN2(decomp, c[indx[k]+i]+c[indx[j]+k+1]+P->stack[type][type_2]); } decomp += 2*P->MLintern[1]; #if 0 /* This is needed for Y shaped ML loops with coax stacking of interior pairs, but backtracking will fail if activated */ DMLi[j] = MIN2(DMLi[j], decomp); if (SAME_STRAND(j-1,j)) DMLi[j] = MIN2(DMLi[j], DMLi[j-1]+P->MLbase); if (SAME_STRAND(i,i+1)) DMLi[j] = MIN2(DMLi[j], DMLi1[j]+P->MLbase); new_fML = MIN2(new_fML, DMLi[j]); #endif new_fML = MIN2(new_fML, decomp); } fML[ij] = Fmi[j] = new_fML; /* substring energy */ } if (i==cut_point) for (j=i; j<=maxj; j++) free_end(fc, j, cut_point); if (i<cut_point) free_end(fc,i,cut_point-1); { int *FF; /* rotate the auxilliary arrays */ FF = DMLi2; DMLi2 = DMLi1; DMLi1 = DMLi; DMLi = FF; FF = cc1; cc1=cc; cc=FF; for (j=1; j<=maxj; j++) {cc[j]=Fmi[j]=DMLi[j]=INF; } } } /* calculate energies of 5' and 3' fragments */ for (i=1; i<=length; i++) free_end(f5, i, 1); if (cut_point>0) { mfe1=f5[cut_point-1]; mfe2=fc[length]; /* add DuplexInit, check whether duplex*/ for (i=cut_point; i<=length; i++) { f5[i]=MIN2(f5[i]+P->DuplexInit, fc[i]+fc[1]); } } energy = f5[length]; if (cut_point<1) mfe1=mfe2=energy; return energy; } PRIVATE void backtrack_co(const char *string, int s, int b /* b=0: start new structure, b \ne 0: add to existing structure */) { /*------------------------------------------------------------------ trace back through the "c", "fc", "f5" and "fML" arrays to get the base pairing list. No search for equivalent structures is done. This is fast, since only few structure elements are recalculated. ------------------------------------------------------------------*/ int i, j, k, length, energy, new; int no_close, type, type_2, tt; int bonus; int dangle_model = P->model_details.dangles; int noGUclosure = P->model_details.noGUclosure; int noLP = P->model_details.noLP; /* int b=0;*/ length = strlen(string); if (s==0) { sector[++s].i = 1; sector[s].j = length; sector[s].ml = (backtrack_type=='M') ? 1 : ((backtrack_type=='C')?2:0); } while (s>0) { int ml, fij, fi, cij, traced, i1, j1, mm, p, q, jj=0, gq=0; int canonical = 1; /* (i,j) closes a canonical structure */ i = sector[s].i; j = sector[s].j; ml = sector[s--].ml; /* ml is a flag indicating if backtracking is to occur in the fML- (1) or in the f-array (0) */ if (ml==2) { base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } if (j < i+TURN+1) continue; /* no more pairs in this interval */ if (ml==0) {fij = f5[j]; fi = f5[j-1];} else if (ml==1) {fij = fML[indx[j]+i]; fi = fML[indx[j-1]+i]+P->MLbase;} else /* 3 or 4 */ { fij = fc[j]; fi = (ml==3) ? INF : fc[j-1]; } if (fij == fi) { /* 3' end is unpaired */ sector[++s].i = i; sector[s].j = j-1; sector[s].ml = ml; continue; } if (ml==0 || ml==4) { /* backtrack in f5 or fc[i=cut,j>cut] */ int *ff; ff = (ml==4) ? fc : f5; switch(dangle_model){ case 0: /* j or j-1 is paired. Find pairing partner */ for (k=j-TURN-1,traced=0; k>=i; k--) { int cc; if(with_gquad){ if(fij == ff[k-1] + ggg[indx[j]+k]){ /* found the decomposition */ traced = j; jj = k - 1; gq = 1; break; } } type = ptype[indx[j]+k]; if(type){ cc = c[indx[j]+k]; if(!SAME_STRAND(k,j)) cc += P->DuplexInit; if(fij == ff[k-1] + cc + E_ExtLoop(type, -1, -1, P)){ traced = j; jj = k-1; } } if(traced) break; } break; case 2: /* j or j-1 is paired. Find pairing partner */ for (k=j-TURN-1,traced=0; k>=i; k--) { int cc; if(with_gquad){ if(fij == ff[k-1] + ggg[indx[j]+k]){ /* found the decomposition */ traced = j; jj = k - 1; gq = 1; break; } } type = ptype[indx[j]+k]; if(type){ cc = c[indx[j]+k]; if(!SAME_STRAND(k,j)) cc += P->DuplexInit; if(fij == ff[k-1] + cc + E_ExtLoop(type, (k>1) && SAME_STRAND(k-1,k) ? S1[k-1] : -1, (j<length) && SAME_STRAND(j,j+1) ? S1[j+1] : -1, P)){ traced = j; jj = k-1; } } if(traced) break; } break; default: for(k=j-TURN-1,traced=0; k>=i; k--){ int cc; type = ptype[indx[j]+k]; if(with_gquad){ if(fij == ff[k-1] + ggg[indx[j]+k]){ /* found the decomposition */ traced = j; jj = k - 1; gq = 1; break; } } if(type){ cc = c[indx[j]+k]; if(!SAME_STRAND(k,j)) cc += P->DuplexInit; if(fij == ff[k-1] + cc + E_ExtLoop(type, -1, -1, P)){ traced = j; jj = k-1; break; } if((k>1) && SAME_STRAND(k-1,k)) if(fij == ff[k-2] + cc + E_ExtLoop(type, S1[k-1], -1, P)){ traced=j; jj=k-2; break; } } type = ptype[indx[j-1]+k]; if(type && SAME_STRAND(j-1,j)){ cc = c[indx[j-1]+k]; if (!SAME_STRAND(k,j-1)) cc += P->DuplexInit; /*???*/ if (fij == cc + ff[k-1] + E_ExtLoop(type, -1, S1[j], P)){ traced=j-1; jj = k-1; break; } if(k>i){ if (fij == ff[k-2] + cc + E_ExtLoop(type, SAME_STRAND(k-1,k) ? S1[k-1] : -1, S1[j], P)){ traced=j-1; jj=k-2; break; } } } } break; } if (!traced) nrerror("backtrack failed in f5 (or fc)"); sector[++s].i = i; sector[s].j = jj; sector[s].ml = ml; i=k; j=traced; if(with_gquad && gq){ /* goto backtrace of gquadruplex */ goto repeat_gquad; } base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } else if (ml==3) { /* backtrack in fc[i<cut,j=cut-1] */ if (fc[i] == fc[i+1]) { /* 5' end is unpaired */ sector[++s].i = i+1; sector[s].j = j; sector[s].ml = ml; continue; } /* i or i+1 is paired. Find pairing partner */ switch(dangle_model){ case 0: for (k=i+TURN+1, traced=0; k<=j; k++){ jj=k+1; type = ptype[indx[k]+i]; if (type) { if(fc[i] == fc[k+1] + c[indx[k]+i] + E_ExtLoop(type, -1, -1, P)){ traced = i; } } else if (with_gquad){ if(fc[i] == fc[k+1] + ggg[indx[k]+i]){ traced = i; gq = 1; break; } } if (traced) break; } break; case 2: for (k=i+TURN+1, traced=0; k<=j; k++){ jj=k+1; type = ptype[indx[k]+i]; if(type){ if(fc[i] == fc[k+1] + c[indx[k]+i] + E_ExtLoop(type,(i>1 && SAME_STRAND(i-1,i)) ? S1[i-1] : -1, SAME_STRAND(k,k+1) ? S1[k+1] : -1, P)){ traced = i; } } else if (with_gquad){ if(fc[i] == fc[k+1] + ggg[indx[k]+i]){ traced = i; gq = 1; break; } } if (traced) break; } break; default: for(k=i+TURN+1, traced=0; k<=j; k++){ jj=k+1; type = ptype[indx[k]+i]; if(type){ if(fc[i] == fc[k+1] + c[indx[k]+i] + E_ExtLoop(type, -1, -1, P)){ traced = i; break; } else if(fc[i] == fc[k+2] + c[indx[k]+i] + E_ExtLoop(type, -1, SAME_STRAND(k,k+1) ? S1[k+1] : -1, P)){ traced = i; jj=k+2; break; } } else if (with_gquad){ if(fc[i] == fc[k+1] + ggg[indx[k]+i]){ traced = i; gq = 1; break; } } type = ptype[indx[k]+i+1]; if(type){ if(fc[i] == fc[k+1] + c[indx[k]+i+1] + E_ExtLoop(type, SAME_STRAND(i, i+1) ? S1[i] : -1, -1, P)){ traced = i+1; break; } if(k<j){ if(fc[i] == fc[k+2] + c[indx[k]+i+1] + E_ExtLoop(type, SAME_STRAND(i, i+1) ? S1[i] : -1, SAME_STRAND(k, k+1) ? S1[k+1] : -1, P)){ traced = i+1; jj=k+2; break; } } } } break; } if (!traced) nrerror("backtrack failed in fc[] 5' of cut"); sector[++s].i = jj; sector[s].j = j; sector[s].ml = ml; j=k; i=traced; if(with_gquad && gq){ /* goto backtrace of gquadruplex */ goto repeat_gquad; } base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } else { /* true multi-loop backtrack in fML */ if (fML[indx[j]+i+1]+P->MLbase == fij) { /* 5' end is unpaired */ sector[++s].i = i+1; sector[s].j = j; sector[s].ml = ml; continue; } if(with_gquad){ if(fij == ggg[indx[j]+i] + E_MLstem(0, -1, -1, P)){ /* go to backtracing of quadruplex */ goto repeat_gquad; } } tt = ptype[indx[j]+i]; cij = c[indx[j]+i]; switch(dangle_model){ case 0: if(fij == cij + E_MLstem(tt, -1, -1, P)){ base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } break; case 2: if(fij == cij + E_MLstem(tt, (i>1) ? S1[i-1] : -1, (j<length) ? S1[j+1] : -1, P)){ base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } break; default: if(fij == cij + E_MLstem(tt, -1, -1, P)){ base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } tt = ptype[indx[j]+i+1]; if(fij == c[indx[j]+i+1] + P->MLbase + E_MLstem(tt, S1[i], -1, P)){ i++; base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } tt = ptype[indx[j-1]+i]; if(fij == c[indx[j-1]+i] + P->MLbase + E_MLstem(tt, -1, S1[j], P)){ j--; base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } tt = ptype[indx[j-1]+i+1]; if(fij == c[indx[j-1]+i+1] + 2*P->MLbase + E_MLstem(tt, S1[i], S1[j], P)){ i++; j--; base_pair2[++b].i = i; base_pair2[b].j = j; goto repeat1; } break; } /* find next component of multiloop */ for (k = i+1+TURN; k <= j-2-TURN; k++) if (fij == (fML[indx[k]+i]+fML[indx[j]+k+1])) break; if ((dangle_model==3)&&(k>j-2-TURN)) { /* must be coax stack */ ml = 2; for (k = i+1+TURN; k <= j-2-TURN; k++) { type = ptype[indx[k]+i]; type= rtype[type]; type_2 = ptype[indx[j]+k+1]; type_2= rtype[type_2]; if (type && type_2) if (fij == c[indx[k]+i]+c[indx[j]+k+1]+P->stack[type][type_2]+ 2*P->MLintern[1]) break; } } sector[++s].i = i; sector[s].j = k; sector[s].ml = ml; sector[++s].i = k+1; sector[s].j = j; sector[s].ml = ml; if (k>j-2-TURN) nrerror("backtrack failed in fML"); continue; } repeat1: /*----- begin of "repeat:" -----*/ if (canonical) cij = c[indx[j]+i]; type = ptype[indx[j]+i]; bonus = 0; if ((BP[i]==j)||(BP[i]==-1)||(BP[i]==-2)) bonus -= BONUS; if ((BP[j]==-1)||(BP[j]==-3)) bonus -= BONUS; if (noLP) if (cij == c[indx[j]+i]) { /* (i.j) closes canonical structures, thus (i+1.j-1) must be a pair */ type_2 = ptype[indx[j-1]+i+1]; type_2 = rtype[type_2]; cij -= P->stack[type][type_2] + bonus; base_pair2[++b].i = i+1; base_pair2[b].j = j-1; i++; j--; canonical=0; goto repeat1; } canonical = 1; no_close = (((type==3)||(type==4))&&noGUclosure&&(bonus==0)); if (SAME_STRAND(i,j)) { if (no_close) { if (cij == FORBIDDEN) continue; } else if (cij == E_Hairpin(j-i-1, type, S1[i+1], S1[j-1],string+i-1, P)+bonus) continue; } else { if(dangle_model){ if(cij == E_ExtLoop(rtype[type], SAME_STRAND(j-1,j) ? S1[j-1] : -1, SAME_STRAND(i,i+1) ? S1[i+1] : -1, P)) continue; } else if(cij == E_ExtLoop(rtype[type], -1, -1, P)) continue; } for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1); p++) { int minq; minq = j-i+p-MAXLOOP-2; if (minq<p+1+TURN) minq = p+1+TURN; for (q = j-1; q >= minq; q--) { type_2 = ptype[indx[q]+p]; if (type_2==0) continue; type_2 = rtype[type_2]; if (noGUclosure) if (no_close||(type_2==3)||(type_2==4)) if ((p>i+1)||(q<j-1)) continue; /* continue unless stack */ /* energy = oldLoopEnergy(i, j, p, q, type, type_2); */ if (SAME_STRAND(i,p) && SAME_STRAND(q,j)) energy = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1], P); else { energy = E_IntLoop_Co(rtype[type], rtype[type_2], i, j, p, q, cut_point, S1[i+1], S1[j-1], S1[p-1], S1[q+1], dangle_model, P); } new = energy+c[indx[q]+p]+bonus; traced = (cij == new); if (traced) { base_pair2[++b].i = p; base_pair2[b].j = q; i = p, j = q; goto repeat1; } } } /* end of repeat: --------------------------------------------------*/ /* (i.j) must close a fake or true multi-loop */ tt = rtype[type]; i1 = i+1; j1 = j-1; if(with_gquad){ /* The case that is handled here actually resembles something like an interior loop where the enclosing base pair is of regular kind and the enclosed pair is not a canonical one but a g-quadruplex that should then be decomposed further... */ if(SAME_STRAND(i,j)){ if(backtrack_GQuad_IntLoop(cij - bonus, i, j, type, S, ggg, indx, &p, &q, P)){ i = p; j = q; goto repeat_gquad; } } } /* fake multi-loop */ if(!SAME_STRAND(i,j)){ int ii, jj, decomp; ii = jj = 0; decomp = fc[i1] + fc[j1]; switch(dangle_model){ case 0: if(cij == decomp + E_ExtLoop(tt, -1, -1, P)){ ii=i1, jj=j1; } break; case 2: if(cij == decomp + E_ExtLoop(tt, SAME_STRAND(j-1,j) ? S1[j-1] : -1, SAME_STRAND(i,i+1) ? S1[i+1] : -1, P)){ ii=i1, jj=j1; } break; default: if(cij == decomp + E_ExtLoop(tt, -1, -1, P)){ ii=i1, jj=j1; } else if(cij == fc[i+2] + fc[j-1] + E_ExtLoop(tt, -1, SAME_STRAND(i,i+1) ? S1[i+1] : -1, P)){ ii = i+2; jj = j1; } else if(cij == fc[i+1] + fc[j-2] + E_ExtLoop(tt, SAME_STRAND(j-1,j) ? S1[j-1] : -1, -1, P)){ ii = i1; jj = j-2; } else if(cij == fc[i+2] + fc[j-2] + E_ExtLoop(tt, SAME_STRAND(j-1,j) ? S1[j-1] : -1, SAME_STRAND(i,i+1) ? S1[i+1] : -1, P)){ ii = i+2; jj = j-2; } break; } if(ii){ sector[++s].i = ii; sector[s].j = cut_point-1; sector[s].ml = 3; sector[++s].i = cut_point; sector[s].j = jj; sector[s].ml = 4; continue; } } /* true multi-loop */ mm = bonus + P->MLclosing; sector[s+1].ml = sector[s+2].ml = 1; int ml0 = E_MLstem(tt, -1, -1, P); int ml5 = E_MLstem(tt, SAME_STRAND(j-1,j) ? S1[j-1] : -1, -1, P); int ml3 = E_MLstem(tt, -1, SAME_STRAND(i,i+1) ? S1[i+1] : -1, P); int ml53 = E_MLstem(tt, SAME_STRAND(j-1,j) ? S1[j-1] : -1, SAME_STRAND(i,i+1) ? S1[i+1] : -1, P); for (traced = 0, k = i+2+TURN; k < j-2-TURN; k++) { switch(dangle_model){ case 0: /* no dangles */ if(cij == mm + fML[indx[k]+i+1] + fML[indx[j-1]+k+1] + ml0) traced = i+1; break; case 2: /*double dangles */ if(cij == mm + fML[indx[k]+i+1] + fML[indx[j-1]+k+1] + ml53) traced = i+1; break; default: /* normal dangles */ if(cij == mm + fML[indx[k]+i+1] + fML[indx[j-1]+k+1] + ml0){ traced = i+1; break; } else if (cij == fML[indx[k]+i+2] + fML[indx[j-1]+k+1] + ml3 + mm + P->MLbase){ traced = i1 = i+2; break; } else if (cij == fML[indx[k]+i+1] + fML[indx[j-2]+k+1] + ml5 + mm + P->MLbase){ traced = i1 = i+1; j1 = j-2; break; } else if (cij == fML[indx[k]+i+2] + fML[indx[j-2]+k+1] + ml53 + mm + 2*P->MLbase){ traced = i1 = i+2; j1 = j-2; break; } break; } if(traced) break; /* coaxial stacking of (i.j) with (i+1.k) or (k.j-1) */ /* use MLintern[1] since coax stacked pairs don't get TerminalAU */ if (dangle_model==3) { int en; type_2 = ptype[indx[k]+i+1]; type_2 = rtype[type_2]; if (type_2) { en = c[indx[k]+i+1]+P->stack[type][type_2]+fML[indx[j-1]+k+1]; if (cij == en+2*P->MLintern[1]+P->MLclosing) { ml = 2; sector[s+1].ml = 2; break; } } type_2 = ptype[indx[j-1]+k+1]; type_2 = rtype[type_2]; if (type_2) { en = c[indx[j-1]+k+1]+P->stack[type][type_2]+fML[indx[k]+i+1]; if (cij == en+2*P->MLintern[1]+P->MLclosing) { sector[s+2].ml = 2; break; } } } } if (k<=j-3-TURN) { /* found the decomposition */ sector[++s].i = i1; sector[s].j = k; sector[++s].i = k+1; sector[s].j = j1; } else { #if 0 /* Y shaped ML loops don't work yet */ if (dangle_model==3) { /* (i,j) must close a Y shaped ML loop with coax stacking */ if (cij == fML[indx[j-2]+i+2] + mm + d3 + d5 + P->MLbase + P->MLbase) { i1 = i+2; j1 = j-2; } else if (cij == fML[indx[j-2]+i+1] + mm + d5 + P->MLbase) j1 = j-2; else if (cij == fML[indx[j-1]+i+2] + mm + d3 + P->MLbase) i1 = i+2; else /* last chance */ if (cij != fML[indx[j-1]+i+1] + mm + P->MLbase) fprintf(stderr, "backtracking failed in repeat"); /* if we arrive here we can express cij via fML[i1,j1]+dangles */ sector[++s].i = i1; sector[s].j = j1; } else #endif nrerror("backtracking failed in repeat"); } continue; /* this is a workarround to not accidentally proceed in the following block */ repeat_gquad: /* now we do some fancy stuff to backtrace the stacksize and linker lengths of the g-quadruplex that should reside within position i,j */ { int l[3], L, a; L = -1; get_gquad_pattern_mfe(S, i, j, P, &L, l); if(L != -1){ /* fill the G's of the quadruplex into base_pair2 */ for(a=0;a<L;a++){ base_pair2[++b].i = i+a; base_pair2[b].j = i+a; base_pair2[++b].i = i+L+l[0]+a; base_pair2[b].j = i+L+l[0]+a; base_pair2[++b].i = i+L+l[0]+L+l[1]+a; base_pair2[b].j = i+L+l[0]+L+l[1]+a; base_pair2[++b].i = i+L+l[0]+L+l[1]+L+l[2]+a; base_pair2[b].j = i+L+l[0]+L+l[1]+L+l[2]+a; } goto repeat_gquad_exit; } nrerror("backtracking failed in repeat_gquad"); } repeat_gquad_exit: asm("nop"); } /* end >> while (s>0) << */ base_pair2[0].i = b; /* save the total number of base pairs */ } PRIVATE void free_end(int *array, int i, int start) { int inc, type, energy, length, j, left, right; int dangle_model = P->model_details.dangles; inc = (i>start)? 1:-1; length = S[0]; if (i==start) array[i]=0; else array[i] = array[i-inc]; if (inc>0) { left = start; right=i; } else { left = i; right = start; } for (j=start; inc*(i-j)>TURN; j+=inc) { int ii, jj; short si, sj; if (i>j) { ii = j; jj = i;} /* inc>0 */ else { ii = i; jj = j;} /* inc<0 */ type = ptype[indx[jj]+ii]; if (type) { /* i is paired with j */ si = (ii>1) && SAME_STRAND(ii-1,ii) ? S1[ii-1] : -1; sj = (jj<length) && SAME_STRAND(jj,jj+1) ? S1[jj+1] : -1; energy = c[indx[jj]+ii]; switch(dangle_model){ case 0: array[i] = MIN2(array[i], array[j-inc] + energy + E_ExtLoop(type, -1, -1, P)); break; case 2: array[i] = MIN2(array[i], array[j-inc] + energy + E_ExtLoop(type, si, sj, P)); break; default: array[i] = MIN2(array[i], array[j-inc] + energy + E_ExtLoop(type, -1, -1, P)); if(inc > 0){ if(j > left) array[i] = MIN2(array[i], array[j-2] + energy + E_ExtLoop(type, si, -1, P)); } else if(j < right) array[i] = MIN2(array[i], array[j+2] + energy + E_ExtLoop(type, -1, sj, P)); break; } } if(with_gquad){ if(SAME_STRAND(ii, jj)) array[i] = MIN2(array[i], array[j-inc] + ggg[indx[jj]+ii]); } if (dangle_model%2==1) { /* interval ends in a dangle (i.e. i-inc is paired) */ if (i>j) { ii = j; jj = i-1;} /* inc>0 */ else { ii = i+1; jj = j;} /* inc<0 */ type = ptype[indx[jj]+ii]; if (!type) continue; si = (ii > left) && SAME_STRAND(ii-1,ii) ? S1[ii-1] : -1; sj = (jj < right) && SAME_STRAND(jj,jj+1) ? S1[jj+1] : -1; energy = c[indx[jj]+ii]; if(inc>0) array[i] = MIN2(array[i], array[j - inc] + energy + E_ExtLoop(type, -1, sj, P)); else array[i] = MIN2(array[i], array[j - inc] + energy + E_ExtLoop(type, si, -1, P)); if(j!= start){ /* dangle_model on both sides */ array[i] = MIN2(array[i], array[j-2*inc] + energy + E_ExtLoop(type, si, sj, P)); } } } } PUBLIC void update_cofold_params(void){ update_cofold_params_par(NULL); } PUBLIC void update_cofold_params_par(paramT *parameters){ if(P) free(P); if(parameters){ P = get_parameter_copy(parameters); } else { model_detailsT md; set_model_details(&md); P = get_scaled_parameters(temperature, md); } make_pair_matrix(); if (init_length < 0) init_length=0; } /*---------------------------------------------------------------------------*/ PRIVATE void make_ptypes(const short *S, const char *structure) { int n,i,j,k,l; int noLP = P->model_details.noLP; n=S[0]; for (k=1; k<n-TURN; k++) for (l=1; l<=2; l++) { int type,ntype=0,otype=0; i=k; j = i+TURN+l; if (j>n) continue; type = pair[S[i]][S[j]]; while ((i>=1)&&(j<=n)) { if ((i>1)&&(j<n)) ntype = pair[S[i-1]][S[j+1]]; if (noLP && (!otype) && (!ntype)) type = 0; /* i.j can only form isolated pairs */ ptype[indx[j]+i] = (char) type; otype = type; type = ntype; i--; j++; } } if (struct_constrained && (structure != NULL)){ constrain_ptypes(structure, (unsigned int)n, ptype, BP, TURN, 0); if(P->model_details.canonicalBPonly) for(i=1;i<n;i++) for(j=i+1;j<=n;j++) if(ptype[indx[j]+i] == 7){ warn_user("removing non-canonical base pair from constraint"); ptype[indx[j]+i] = 0; } } } PUBLIC void get_monomere_mfes(float *e1, float *e2) { /*exports monomere free energies*/ *e1 = mfe1; *e2 = mfe2; } PRIVATE void backtrack(const char *sequence) { /*routine to call backtrack_co from 1 to n, backtrack type??*/ backtrack_co(sequence, 0,0); } PRIVATE int comp_pair(const void *A, const void *B) { bondT *x,*y; int ex, ey; x = (bondT *) A; y = (bondT *) B; ex = c[indx[x->j]+x->i]+c[indx[x->i+length]+x->j]; ey = c[indx[y->j]+y->i]+c[indx[y->i+length]+y->j]; if (ex>ey) return 1; if (ex<ey) return -1; return (indx[x->j]+x->i - indx[y->j]+y->i); } PUBLIC SOLUTION *zukersubopt(const char *string) { return zukersubopt_par(string, NULL); } PUBLIC SOLUTION *zukersubopt_par(const char *string, paramT *parameters){ /* Compute zuker suboptimal. Here, we're abusing the cofold() code "double" sequence, compute dimerarray entries, track back every base pair. This is slightly wasteful compared to the normal solution */ char *doubleseq, *structure, *mfestructure, **todo; int i, j, counter, num_pairs, psize, p; float energy; SOLUTION *zukresults; bondT *pairlist; num_pairs = counter = 0; zuker = 1; length = (int)strlen(string); doubleseq = (char *)space((2*length+1)*sizeof(char)); mfestructure = (char *) space((unsigned) 2*length+1); structure = (char *) space((unsigned) 2*length+1); zukresults = (SOLUTION *)space(((length*(length-1))/2)*sizeof(SOLUTION)); mfestructure[0] = '\0'; BP = (int *)space(sizeof(int)*(2*length+2)); /* double the sequence */ strcpy(doubleseq,string); strcat(doubleseq,string); cut_point = length + 1; /* get mfe and do forward recursion */ #ifdef _OPENMP /* always init everything since all global static variables are uninitialized when entering a thread */ init_cofold(2 * length, parameters); #else if(parameters) init_cofold(2 * length, parameters); else if ((2 * length) > init_length) init_cofold(2 * length, parameters); else if (fabs(P->temperature - temperature)>1e-6) update_cofold_params_par(parameters); #endif S = encode_sequence(doubleseq, 0); S1 = encode_sequence(doubleseq, 1); S1[0] = S[0]; /* store length at pos. 0 */ make_ptypes(S, NULL); /* no constraint folding possible (yet?) with zukersubopt */ (void)fill_arrays(doubleseq); psize = length; pairlist = (bondT *) space(sizeof(bondT)*(psize+1)); todo = (char **) space(sizeof(char *)*(length+1)); for (i=1; i<length; i++) { todo[i] = (char *) space(sizeof(char)*(length+1)); } /* Make a list of all base pairs */ for (i=1; i<length; i++) { for (j=i+TURN2+1/*??*/; j<=length; j++) { if (ptype[indx[j]+i]==0) continue; if (num_pairs>=psize) { psize = 1.2*psize + 32; pairlist = xrealloc(pairlist, sizeof(bondT)*(psize+1)); } pairlist[num_pairs].i = i; pairlist[num_pairs++].j = j; todo[i][j]=1; } } qsort(pairlist, num_pairs, sizeof(bondT), comp_pair); for (p=0; p<num_pairs; p++) { i=pairlist[p].i; j=pairlist[p].j; if (todo[i][j]) { int k; sector[1].i = i; sector[1].j = j; sector[1].ml = 2; backtrack_co(doubleseq, 1,0); sector[1].i = j; sector[1].j = i + length; sector[1].ml = 2; backtrack_co(doubleseq, 1,base_pair2[0].i); energy = c[indx[j]+i]+c[indx[i+length]+j]; parenthesis_zuker(structure, base_pair2, length); zukresults[counter].energy = energy; zukresults[counter++].structure = strdup(structure); for (k = 1; k <= base_pair2[0].i; k++) { /* mark all pairs in structure as done */ int x,y; x=base_pair2[k].i; y=base_pair2[k].j; if (x>length) x-=length; if (y>length) y-=length; if (x>y) { int temp; temp=x; x=y; y=temp; } todo[x][y] = 0; } } } /*free zeugs*/ free(pairlist); for (i=1; i<length; i++) free(todo[i]); free(todo); free(structure); free(mfestructure); free(doubleseq); zuker=0; free(S); free(S1); free(BP); return zukresults; } /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ PUBLIC void initialize_cofold(int length){ /* DO NOTHING */ }
critical.c
// OpenMP Critical Example // Inclusions #include <omp.h> #include <stdio.h> #include <stdlib.h> // Main int main( int argc, char** argv ) { int *a = malloc( 25 * sizeof( int ) ); // Array of Values int i = 0; // Loop Iterator int n = 25; // Number of Iteratins int localSum = 0; // Private Local Sum for Each Core int totalSum = 0; // Shared Total Sum for All Cores int thread = 0; // Thread Number // Fill Array with Values 1 to 25 for( i = 0; i < n; i++ ) { a[i] = i + 1; } // Parallel Region #pragma omp parallel shared( n, a, totalSum ) private( thread, localSum ) // Share Number of Iterations, Array, and the Total Sum // Keep the Thread Number and Local Sum Private { thread = omp_get_thread_num( ); // Get the Thread Number localSum = 0; // Preset Local Sum to Zero #pragma omp for // Parallelize the Next For for( i = 0; i < n; i++ ) { localSum += a[i]; // Accumulate Array Values into Local Sum } #pragma omp critical( totalSum ) // Critical Region - one core at a time. { totalSum += localSum; // Accumulate Local Sum Values into Total Sum printf( "Thread %d has local sum %d and adds to total sum %d.\n", thread, localSum, totalSum ); } } printf( "Total sum at end is %d.\n", totalSum ); free( a ); return 0; } // End critical.c - EWG SDG
chan_demo4.c
#include <stdio.h> #include <math.h> #define data_t int #define prefix int #include <chan.h> #undef prefix #undef data_t #define data_t float #define prefix float #include <chan.h> #undef prefix #undef data_t void produce(chan_int_t *ch) { for (int i = 0; i < 100; i++) { chan_int_send(ch, i); } chan_int_close(ch); } void consume(chan_int_t *in, chan_float_t *out) { float total = 0.0; int x; while (chan_int_recv(in, &x) == CHAN_SUCCESS) { total += sqrt((float) x); } chan_float_send(out, total); } int main(void) { chan_int_t *ch1 = chan_int_init(10); chan_float_t *ch2 = chan_float_init(0); int nth = 6; #pragma omp parallel sections { #pragma omp section produce(ch1); #pragma omp section consume(ch1, ch2); #pragma omp section consume(ch1, ch2); #pragma omp section consume(ch1, ch2); #pragma omp section consume(ch1, ch2); #pragma omp section consume(ch1, ch2); #pragma omp section consume(ch1, ch2); #pragma omp section { float total = 0.0; for (int th = 0; th < nth; th++) { float v; chan_float_recv(ch2, &v); total += v; } printf("total = %20.8f\n", total); fflush(stdout); } } chan_int_destroy(&ch1); chan_float_destroy(&ch2); return 0; }
GB_unaryop__minv_uint64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int16 // op(A') function: GB_tran__minv_uint64_int16 // C type: uint64_t // A type: int16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int16 ( uint64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
conv_dw_dilation_kernel_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #ifndef __CONV_DW_DILATION_KERNEL_ARM_H_ #define __CONV_DW_DILATION_KERNEL_ARM_H_ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "convolution_param.h" #include "conv_dw_k5_k7_kernel_arm.h" int conv_dw_dilation_run(float* input_buf, float* weight_buf, float* bias, float* output_buf, int input_h, int input_w, int channel, int pad, int activation, int num_thread) { int channel_size = input_h * input_w; int mid_w = input_w - pad * 2; int mid_block_end = (mid_w & -4) + pad; int mid_end = mid_w + pad; int w = 0; #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float* input_buf_c = input_buf + c * channel_size; float* output_buf_c = output_buf + c * channel_size; float* weight_buf_c = weight_buf + c * 9; float bias_c = bias ? bias[c] : 0; for (int h = 0; h < pad; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]), vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]), vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } for (int h = pad; h < input_h - pad; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]), vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]), vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[6]), vld1q_f32(input_buf_c + (h + pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[7]), vld1q_f32(input_buf_c + (h + pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[8]), vld1q_f32(input_buf_c + (h + pad) * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; tmp += weight_buf_c[8] * input_buf_c[(h + pad) * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[6] * input_buf_c[(h + pad) * input_w + w - pad]; tmp += weight_buf_c[7] * input_buf_c[(h + pad) * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } for (int h = input_h - pad; h < input_h; h++) { for (w = 0; w < pad; w++) { float tmp = bias_c; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < mid_block_end; w += 4) { float32x4_t tmp_4 = vdupq_n_f32(bias_c); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[0]), vld1q_f32(input_buf_c + (h - pad) * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[1]), vld1q_f32(input_buf_c + (h - pad) * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[2]), vld1q_f32(input_buf_c + (h - pad) * input_w + w + pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[3]), vld1q_f32(input_buf_c + h * input_w + w - pad)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[4]), vld1q_f32(input_buf_c + h * input_w + w)); tmp_4 = vmlaq_f32(tmp_4, vdupq_n_f32(weight_buf_c[5]), vld1q_f32(input_buf_c + h * input_w + w + pad)); tmp_4 = vector_activation(tmp_4, activation); vst1q_f32(output_buf_c + h * input_w + w, tmp_4); } for (; w < mid_end; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[2] * input_buf_c[(h - pad) * input_w + w + pad]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; tmp += weight_buf_c[5] * input_buf_c[h * input_w + w + pad]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } for (; w < input_w; w++) { float tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[(h - pad) * input_w + w - pad]; tmp += weight_buf_c[1] * input_buf_c[(h - pad) * input_w + w]; tmp += weight_buf_c[3] * input_buf_c[h * input_w + w - pad]; tmp += weight_buf_c[4] * input_buf_c[h * input_w + w]; output_buf_c[h * input_w + w] = elem_activation(tmp, activation); ; } } } return 0; } #endif
mandatory_but_no_devices.c
// Check that mandatory offloading causes various offloading directives to fail // when omp_get_num_devices() == 0 even if the requested device is the initial // device. This behavior is proposed for OpenMP 5.2 in OpenMP spec github // issue 2669. // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DDIR=target // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DDIR='target teams' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DDIR='target data map(X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target enter data map(to:X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target exit data map(from:X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target update to(X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // RUN: %libomptarget-compile-nvptx64-nvidia-cuda \ // RUN: -DDIR='target update from(X)' // RUN: env OMP_TARGET_OFFLOAD=mandatory CUDA_VISIBLE_DEVICES= \ // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 | \ // RUN: %fcheck-nvptx64-nvidia-cuda // REQUIRES: nvptx64-nvidia-cuda #include <omp.h> #include <stdio.h> // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory int main(void) { int X; #pragma omp DIR device(omp_get_initial_device()) ; return 0; }
perturbations.c
/** @file perturbations.c Documented perturbation module * * Julien Lesgourgues, 23.09.2010 * * Deals with the perturbation evolution. * This mdule has two purposes: * * - at the beginning, to initialize the perturbations, i.e. to * integrate the perturbation equations, and store temporarily the terms * contributing to the source functions as a function of conformal * time. Then, to perform a few manipulations of these terms in order to * infer the actual source functions \f$ S^{X} (k, \tau) \f$, and to * store them as a function of conformal time inside an interpolation * table. * * - at any time in the code, to evaluate the source functions at a * given conformal time (by interpolating within the interpolation * table). * * Hence the following functions can be called from other modules: * * -# perturb_init() at the beginning (but after background_init() and thermodynamics_init()) * -# perturb_sources_at_tau() at any later time * -# perturb_free() at the end, hen no more calls to perturb_sources_at_tau() are needed */ #include "perturbations.h" /** * Source function \f$ S^{X} (k, \tau) \f$ at a given conformal time tau. * * Evaluate source functions at given conformal time tau by reading * the pre-computed table and interpolating. * * @param ppt Input : pointer to perturbation structure containing interpolation tables * @param index_md Input : index of requested mode * @param index_ic Input : index of requested initial condition * @param index_type Input : index of requested source function type * @param tau Input : any value of conformal time * @param psource Output: vector (already allocated) of source function as a function of k * @return the error status */ int perturb_sources_at_tau( struct perturbs * ppt, int index_md, int index_ic, int index_type, double tau, double * psource ) { /** Summary: */ /** - interpolate in pre-computed table contained in ppt */ class_call(array_interpolate_two_bis(ppt->tau_sampling, 1, 0, ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type], ppt->k_size[index_md], ppt->tau_size, tau, psource, ppt->k_size[index_md], ppt->error_message), ppt->error_message, ppt->error_message); return _SUCCESS_; } /** * Initialize the perturbs structure, and in particular the table of source functions. * * Main steps: * * - given the values of the flags describing which kind of * perturbations should be considered (modes: scalar/vector/tensor, * initial conditions, type of source functions needed...), * initialize indices and wavenumber list * * - define the time sampling for the output source functions * * - for each mode (scalar/vector/tensor): initialize the indices of * relevant perturbations, integrate the differential system, * compute and store the source functions. * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background strucutre * @param pth Input : pointer to thermodynamics structure * @param ppt Output: Initialized perturbation structure * @return the error status */ int perturb_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ /* running index for modes */ int index_md; /* running index for initial conditions */ int index_ic; /* running index for wavenumbers */ int index_k; /* pointer to one struct perturb_workspace per thread (one if no openmp) */ struct perturb_workspace ** pppw; /* number of threads (always one if no openmp) */ int number_of_threads=1; /* index of the thread (always 0 if no openmp) */ int thread=0; /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the parallel region. */ int abort; /* unsigned integer that will be set to the size of the workspace */ size_t sz; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop, tspent; #endif /** - preliminary checks */ if (ppt->has_perturbations == _FALSE_) { if (ppt->perturbations_verbose > 0) printf("No sources requested. Perturbation module skipped.\n"); return _SUCCESS_; } else { if (ppt->perturbations_verbose > 0) printf("Computing sources\n"); } class_test((ppt->gauge == synchronous) && (pba->has_cdm == _FALSE_), ppt->error_message, "In the synchronous gauge, it is not self-consistent to assume no CDM: the later is used to define the initial timelike hypersurface. You can either add a negligible amount of CDM or switch to newtonian gauge"); class_test ((ppr->tight_coupling_approximation < first_order_MB) || (ppr->tight_coupling_approximation > compromise_CLASS), ppt->error_message, "your tight_coupling_approximation is set to %d, out of range defined in perturbations.h",ppr->tight_coupling_approximation); class_test ((ppr->radiation_streaming_approximation < rsa_null) || (ppr->radiation_streaming_approximation > rsa_none), ppt->error_message, "your radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->radiation_streaming_approximation); if (pba->has_ur == _TRUE_) { class_test ((ppr->ur_fluid_approximation < ufa_mb) || (ppr->ur_fluid_approximation > ufa_none), ppt->error_message, "your ur_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ur_fluid_approximation); } if (pba->has_ncdm == _TRUE_) { class_test ((ppr->ncdm_fluid_approximation < ncdmfa_mb) || (ppr->ncdm_fluid_approximation > ncdmfa_none), ppt->error_message, "your ncdm_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ncdm_fluid_approximation); } if (pba->has_fld == _TRUE_) { class_test(pba->w0_fld+pba->wa_fld >= 0., ppt->error_message, "So far, the fluid is meant to be negligible at early time, and not to be important for defining the initial conditions of other species. You are using parameters for which this assumption may break down, so maybe it's the case to fully implement the fluid in the initial condition routine"); class_test((pba->w0_fld==-1.) && (pba->wa_fld==0.), ppt->error_message, "Your choice of a fluid with (w0,wa)=(-1,0) is not valid due to instabilities in the unphysical perturbations of such a fluid. Try instead with a plain cosmological constant"); class_test(((pba->w0_fld + pba->wa_fld +1.0)*(pba->w0_fld+1.0)) < 0.0, ppt->error_message, "w crosses -1 between the infinite past and today, and this would lead to divergent perturbation equations for the fluid."); } if (pba->has_dcdm == _TRUE_) { class_test((ppt->has_cdi == _TRUE_) || (ppt->has_bi == _TRUE_) || (ppt->has_nid == _TRUE_) || (ppt->has_niv == _TRUE_), ppt->error_message, "Non-adiabatic initial conditions not coded in presence of decaying dark matter"); } if (pba->has_smg == _TRUE_) { class_test(ppt->gauge == newtonian, ppt->error_message, "Asked for scalar modified gravity AND Newtonian gauge. Not yet implemented"); // TODO think of some suitable tests for the scalar field } class_test(ppt->has_vectors == _TRUE_, ppt->error_message, "Vectors not coded yet"); if ((ppt->has_niv == _TRUE_) && (ppt->perturbations_verbose > 0)) { printf("Warning: the niv initial conditions in CLASS (and also in CAMB) should still be double-checked: if you want to do it and send feedback, you are welcome!\n"); } if (ppt->has_tensors == _TRUE_) { ppt->evolve_tensor_ur = _FALSE_; ppt->evolve_tensor_ncdm = _FALSE_; switch (ppt->tensor_method) { case (tm_photons_only): break; case (tm_massless_approximation): if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) ppt->evolve_tensor_ur = _TRUE_; break; case (tm_exact): if (pba->has_ur == _TRUE_) ppt->evolve_tensor_ur = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->evolve_tensor_ncdm = _TRUE_; break; } } /** - initialize all indices and lists in perturbs structure using perturb_indices_of_perturbs() */ class_call(perturb_indices_of_perturbs(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); /** - define the common time sampling for all sources using perturb_timesampling_for_sources() */ class_call(perturb_timesampling_for_sources(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); /** - if we want to store perturbations, write titles and allocate storage */ class_call(perturb_prepare_output(pba,ppt), ppt->error_message, ppt->error_message); /** - create an array of workspaces in multi-thread case */ #ifdef _OPENMP #pragma omp parallel { number_of_threads = omp_get_num_threads(); } #endif class_alloc(pppw,number_of_threads * sizeof(struct perturb_workspace *),ppt->error_message); /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < ppt->md_size; index_md++) { if (ppt->perturbations_verbose > 1) printf("Evolving mode %d/%d\n",index_md+1,ppt->md_size); abort = _FALSE_; sz = sizeof(struct perturb_workspace); #pragma omp parallel \ shared(pppw,ppr,pba,pth,ppt,index_md,abort,number_of_threads) \ private(thread) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); #endif /** create a workspace (one per thread in multi-thread case) */ class_alloc_parallel(pppw[thread],sz,ppt->error_message); /** (a) initialize indices of vectors of perturbations with perturb_indices_of_current_vectors() */ class_call_parallel(perturb_workspace_init(ppr, pba, pth, ppt, index_md, pppw[thread]), ppt->error_message, ppt->error_message); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; /** (c) loop over initial conditions and wavenumbers; for each of them, evolve perturbations and compute source functions with perturb_solve() */ for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { if (ppt->perturbations_verbose > 1) printf("Evolving ic %d/%d\n",index_ic+1,ppt->ic_size[index_md]); if (ppt->perturbations_verbose > 1) printf("evolving %d wavenumbers\n",ppt->k_size[index_md]); abort = _FALSE_; #pragma omp parallel \ shared(pppw,ppr,pba,pth,ppt,index_md,index_ic,abort,number_of_threads) \ private(index_k,thread,tstart,tstop,tspent) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); tspent=0.; #endif #pragma omp for schedule (dynamic) /* integrating backwards is slightly more optimal for parallel runs */ //for (index_k = 0; index_k < ppt->k_size; index_k++) { for (index_k = ppt->k_size[index_md]-1; index_k >=0; index_k--) { if ((ppt->perturbations_verbose > 2) && (abort == _FALSE_)) { printf("evolving mode k=%e /Mpc (%d/%d)",ppt->k[index_md][index_k],index_k+1,ppt->k_size[index_md]); if (pba->sgnK != 0) printf(" (for scalar modes, corresponds to nu=%e)",sqrt(ppt->k[index_md][index_k]*ppt->k[index_md][index_k]+pba->K)/sqrt(pba->sgnK*pba->K)); printf("\n"); } #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_call_parallel(perturb_solve(ppr, pba, pth, ppt, index_md, index_ic, index_k, pppw[thread]), ppt->error_message, ppt->error_message); #ifdef _OPENMP tstop = omp_get_wtime(); tspent += tstop-tstart; #endif #pragma omp flush(abort) } /* end of loop over wavenumbers */ #ifdef _OPENMP if (ppt->perturbations_verbose>1) printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n", __func__,tspent,omp_get_thread_num()); #endif } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } /* end of loop over initial conditions */ abort = _FALSE_; #pragma omp parallel \ shared(pppw,ppt,index_md,abort,number_of_threads) \ private(thread) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); #endif class_call_parallel(perturb_workspace_free(ppt,index_md,pppw[thread]), ppt->error_message, ppt->error_message); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } /* end loop over modes */ free(pppw); return _SUCCESS_; } /** * Free all memory space allocated by perturb_init(). * * To be called at the end of each run, only when no further calls to * perturb_sources_at_tau() are needed. * * @param ppt Input: perturbation structure to be freed * @return the error status */ int perturb_free( struct perturbs * ppt ) { int index_md,index_ic,index_type; int filenum; if (ppt->has_perturbations == _TRUE_) { for (index_md = 0; index_md < ppt->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { free(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type]); } } free(ppt->sources[index_md]); free(ppt->k[index_md]); } free(ppt->tau_sampling); free(ppt->tp_size); free(ppt->ic_size); free(ppt->k); free(ppt->k_size_cmb); free(ppt->k_size_cl); free(ppt->k_size); free(ppt->sources); /** Stuff related to perturbations output: */ /** Free non-NULL pointers: */ if (ppt->index_k_output_values != NULL) free(ppt->index_k_output_values); for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){ if (ppt->scalar_perturbations_data[filenum] != NULL) free(ppt->scalar_perturbations_data[filenum]); if (ppt->vector_perturbations_data[filenum] != NULL) free(ppt->vector_perturbations_data[filenum]); if (ppt->tensor_perturbations_data[filenum] != NULL) free(ppt->tensor_perturbations_data[filenum]); } } return _SUCCESS_; } /** * Initialize all indices and allocate most arrays in perturbs structure. * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background strucutre * @param pth Input : pointer to thermodynamics structure * @param ppt Input/Output: Initialized perturbation structure * @return the error status */ int perturb_indices_of_perturbs( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ int index_type; int index_md; int index_ic; int index_type_common; /** - count modes (scalar, vector, tensor) and assign corresponding indices */ index_md = 0; class_define_index(ppt->index_md_scalars,ppt->has_scalars,index_md,1); class_define_index(ppt->index_md_vectors,ppt->has_vectors,index_md,1); class_define_index(ppt->index_md_tensors,ppt->has_tensors,index_md,1); ppt->md_size = index_md; class_test(index_md == 0, ppt->error_message, "you should have at least one out of {scalars, vectors, tensors} !!!"); /** - allocate array of number of types for each mode, ppt->tp_size[index_md] */ class_alloc(ppt->tp_size,ppt->md_size*sizeof(int),ppt->error_message); /** - allocate array of number of initial conditions for each mode, ppt->ic_size[index_md] */ class_alloc(ppt->ic_size,ppt->md_size*sizeof(int),ppt->error_message); /** - allocate array of arrays of source functions for each mode, ppt->source[index_md] */ class_alloc(ppt->sources,ppt->md_size * sizeof(double *),ppt->error_message); /** initialization all flags to false (will eventually be set to true later) */ ppt->has_cmb = _FALSE_; ppt->has_lss = _FALSE_; ppt->has_source_t = _FALSE_; ppt->has_source_p = _FALSE_; ppt->has_source_delta_m = _FALSE_; ppt->has_source_delta_g = _FALSE_; ppt->has_source_delta_b = _FALSE_; ppt->has_source_delta_cdm = _FALSE_; ppt->has_source_delta_dcdm = _FALSE_; ppt->has_source_delta_fld = _FALSE_; ppt->has_source_delta_scf = _FALSE_; ppt->has_source_phi_smg = _FALSE_; //scalar field ppt->has_source_delta_dr = _FALSE_; ppt->has_source_delta_ur = _FALSE_; ppt->has_source_delta_ncdm = _FALSE_; ppt->has_source_theta_m = _FALSE_; ppt->has_source_theta_g = _FALSE_; ppt->has_source_theta_b = _FALSE_; ppt->has_source_theta_cdm = _FALSE_; ppt->has_source_theta_dcdm = _FALSE_; ppt->has_source_theta_fld = _FALSE_; ppt->has_source_theta_scf = _FALSE_; ppt->has_source_phi_prime_smg = _FALSE_; //scalar field ppt->has_source_theta_dr = _FALSE_; ppt->has_source_theta_ur = _FALSE_; ppt->has_source_theta_ncdm = _FALSE_; ppt->has_source_phi = _FALSE_; ppt->has_source_phi_prime = _FALSE_; ppt->has_source_phi_plus_psi = _FALSE_; ppt->has_source_psi = _FALSE_; /** - source flags and indices, for sources that all modes have in common (temperature, polarization, ...). For temperature, the term t2 is always non-zero, while other terms are non-zero only for scalars and vectors. For polarization, the term e is always non-zero, while the term b is only for vectors and tensors. */ if (ppt->has_cl_cmb_temperature == _TRUE_) { ppt->has_source_t = _TRUE_; ppt->has_cmb = _TRUE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { ppt->has_source_p = _TRUE_; ppt->has_cmb = _TRUE_; } index_type = 0; class_define_index(ppt->index_tp_t2,ppt->has_source_t,index_type,1); class_define_index(ppt->index_tp_p,ppt->has_source_p,index_type,1); index_type_common = index_type; /* indices for perturbed recombination */ class_define_index(ppt->index_tp_perturbed_recombination_delta_temp,ppt->has_perturbed_recombination,index_type,1); class_define_index(ppt->index_tp_perturbed_recombination_delta_chi,ppt->has_perturbed_recombination,index_type,1); /** define k values with perturb_get_k_list() */ class_call(perturb_get_k_list(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); /** - loop over modes. Initialize flags and indices which are specific to each mode. */ for (index_md = 0; index_md < ppt->md_size; index_md++) { /** (a) scalars */ if (_scalars_) { /** - source flags and indices, for sources that are specific to scalars */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) || (ppt->has_cl_lensing_potential)) { ppt->has_lss = _TRUE_; ppt->has_source_phi_plus_psi = _TRUE_; } if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m)) { ppt->has_lss = _TRUE_; ppt->has_source_delta_m = _TRUE_; } if (ppt->has_density_transfers == _TRUE_) { ppt->has_lss = _TRUE_; ppt->has_source_delta_g = _TRUE_; ppt->has_source_delta_b = _TRUE_; if (pba->has_cdm == _TRUE_) ppt->has_source_delta_cdm = _TRUE_; if (pba->has_dcdm == _TRUE_) ppt->has_source_delta_dcdm = _TRUE_; if (pba->has_fld == _TRUE_) ppt->has_source_delta_fld = _TRUE_; if (pba->has_scf == _TRUE_) ppt->has_source_delta_scf = _TRUE_; if (pba->has_ur == _TRUE_) ppt->has_source_delta_ur = _TRUE_; if (pba->has_smg == _TRUE_) ppt->has_source_phi_smg = _TRUE_; if (pba->has_dr == _TRUE_) ppt->has_source_delta_dr = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->has_source_delta_ncdm = _TRUE_; } if (ppt->has_velocity_transfers == _TRUE_) { ppt->has_lss = _TRUE_; ppt->has_source_theta_g = _TRUE_; ppt->has_source_theta_b = _TRUE_; if ((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous)) ppt->has_source_theta_cdm = _TRUE_; if (pba->has_dcdm == _TRUE_) ppt->has_source_theta_dcdm = _TRUE_; if (pba->has_fld == _TRUE_) ppt->has_source_theta_fld = _TRUE_; if (pba->has_scf == _TRUE_) ppt->has_source_theta_scf = _TRUE_; if (pba->has_smg == _TRUE_) ppt->has_source_phi_prime_smg = _TRUE_; if (pba->has_ur == _TRUE_) ppt->has_source_theta_ur = _TRUE_; if (pba->has_dr == _TRUE_) ppt->has_source_theta_dr = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->has_source_theta_ncdm = _TRUE_; } if (ppt->has_cl_number_count == _TRUE_) { ppt->has_lss = _TRUE_; if (ppt->has_nc_density == _TRUE_) { ppt->has_source_delta_m = _TRUE_; } if (ppt->has_nc_rsd == _TRUE_) { ppt->has_source_theta_m = _TRUE_; } if (ppt->has_nc_lens == _TRUE_) { ppt->has_source_phi_plus_psi = _TRUE_; } if (ppt->has_nc_gr == _TRUE_) { ppt->has_source_phi = _TRUE_; ppt->has_source_psi = _TRUE_; ppt->has_source_phi_prime = _TRUE_; ppt->has_source_phi_plus_psi = _TRUE_; } } index_type = index_type_common; class_define_index(ppt->index_tp_t0, ppt->has_source_t, index_type,1); class_define_index(ppt->index_tp_t1, ppt->has_source_t, index_type,1); class_define_index(ppt->index_tp_delta_m, ppt->has_source_delta_m, index_type,1); class_define_index(ppt->index_tp_delta_g, ppt->has_source_delta_g, index_type,1); class_define_index(ppt->index_tp_delta_b, ppt->has_source_delta_b, index_type,1); class_define_index(ppt->index_tp_delta_cdm, ppt->has_source_delta_cdm, index_type,1); class_define_index(ppt->index_tp_delta_dcdm, ppt->has_source_delta_dcdm,index_type,1); class_define_index(ppt->index_tp_delta_fld, ppt->has_source_delta_fld, index_type,1); class_define_index(ppt->index_tp_delta_scf, ppt->has_source_delta_scf, index_type,1); class_define_index(ppt->index_tp_phi_smg, ppt->has_source_phi_smg, index_type,1); class_define_index(ppt->index_tp_delta_dr, ppt->has_source_delta_dr, index_type,1); class_define_index(ppt->index_tp_delta_ur, ppt->has_source_delta_ur, index_type,1); class_define_index(ppt->index_tp_delta_ncdm1,ppt->has_source_delta_ncdm,index_type,pba->N_ncdm); class_define_index(ppt->index_tp_theta_m, ppt->has_source_theta_m, index_type,1); class_define_index(ppt->index_tp_theta_g, ppt->has_source_theta_g, index_type,1); class_define_index(ppt->index_tp_theta_b, ppt->has_source_theta_b, index_type,1); class_define_index(ppt->index_tp_theta_cdm, ppt->has_source_theta_cdm, index_type,1); class_define_index(ppt->index_tp_theta_dcdm, ppt->has_source_theta_dcdm,index_type,1); class_define_index(ppt->index_tp_theta_fld, ppt->has_source_theta_fld, index_type,1); class_define_index(ppt->index_tp_theta_scf, ppt->has_source_theta_scf, index_type,1); class_define_index(ppt->index_tp_phi_prime_smg, ppt->has_source_phi_prime_smg, index_type,1); class_define_index(ppt->index_tp_theta_dr, ppt->has_source_theta_dr, index_type,1); class_define_index(ppt->index_tp_theta_ur, ppt->has_source_theta_ur, index_type,1); class_define_index(ppt->index_tp_theta_ncdm1,ppt->has_source_theta_ncdm,index_type,pba->N_ncdm); class_define_index(ppt->index_tp_phi, ppt->has_source_phi, index_type,1); class_define_index(ppt->index_tp_phi_prime, ppt->has_source_phi_prime, index_type,1); class_define_index(ppt->index_tp_phi_plus_psi,ppt->has_source_phi_plus_psi,index_type,1); class_define_index(ppt->index_tp_psi, ppt->has_source_psi, index_type,1); ppt->tp_size[index_md] = index_type; class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for scalars, so you should have at least one non-zero scalar source type (temperature, polarisation, lensing/gravitational potential, ...). Please adjust your input."); /** -- count scalar initial conditions (for scalars: ad, cdi, nid, niv; for tensors: only one) and assign corresponding indices */ index_ic = 0; class_define_index(ppt->index_ic_ad, ppt->has_ad, index_ic,1); class_define_index(ppt->index_ic_bi, ppt->has_bi, index_ic,1); class_define_index(ppt->index_ic_cdi,ppt->has_cdi,index_ic,1); class_define_index(ppt->index_ic_nid,ppt->has_nid,index_ic,1); class_define_index(ppt->index_ic_niv,ppt->has_niv,index_ic,1); ppt->ic_size[index_md] = index_ic; class_test(index_ic == 0, ppt->error_message, "you should have at least one adiabatic or isocurvature initial condition...} !!!"); } if (_vectors_) { /** - source flags and indices, for sources that are specific to tensors */ index_type = index_type_common; class_define_index(ppt->index_tp_t1,ppt->has_source_t,index_type,1); ppt->tp_size[index_md] = index_type; /** class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for vectors, so you should have at least one non-zero vector source type (temperature or polarisation). Please adjust your input."); */ /** -- initial conditions for vectors*/ index_ic = 0; /* not coded yet */ ppt->ic_size[index_md] = index_ic; } /** (b) tensors */ if (_tensors_) { /** - source flags and indices, for sources that are specific to tensors */ index_type = index_type_common; /* nothing specific, unlike for vectors and scalars! */ ppt->tp_size[index_md] = index_type; /** class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for tensors, so you should have at least one non-zero tensor source type (temperature or polarisation). Please adjust your input."); */ /** -- only one initial condition for tensors*/ index_ic = 0; class_define_index(ppt->index_ic_ten,_TRUE_,index_ic,1); ppt->ic_size[index_md] = index_ic; } /** (c) for each mode, allocate array of arrays of source functions for each initial conditions and wavenumber, (ppt->source[index_md])[index_ic][index_type] */ class_alloc(ppt->sources[index_md], ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *), ppt->error_message); } return _SUCCESS_; } /** * Define time sampling for source functions. * * For each type, compute the list of values of tau at which sources * will be sampled. Knowing the number of tau values, allocate all * arrays of source functions. * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background strucutre * @param pth Input : pointer to thermodynamics structure * @param ppt Input/Output: Initialized perturbation structure * @return the error status */ int perturb_timesampling_for_sources( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ int counter; int index_md; int index_type; int index_ic; int last_index_back; int last_index_thermo; int first_index_back; int first_index_thermo; double tau; double tau_ini; double tau_lower; double tau_upper; double tau_mid; double timescale_source; double rate_thermo; double rate_isw_squared; double a_prime_over_a; double a_primeprime_over_a; double * pvecback; double * pvecthermo; /** - allocate background/thermodynamics vectors */ class_alloc(pvecback,pba->bg_size_short*sizeof(double),ppt->error_message); class_alloc(pvecthermo,pth->th_size*sizeof(double),ppt->error_message); /** - first, just count the number of sampling points in order to allocate the array containing all values: */ /** (a) if CMB requested, first sampling point = when the universe stops being opaque; otherwise, start sampling gravitational potential at recombination [however, if perturbed recombination is requested, we also need to start the system before recombination. Otherwise, the initial conditions for gas temperature and ionization fraction perturbations (delta_T = 1/3 delta_b, delta_x_e) are not valid]. */ if ((ppt->has_cmb == _TRUE_)||(ppt->has_perturbed_recombination == _TRUE_)) { /* using bisection, search time tau such that the ratio of thermo to Hubble time scales tau_c/tau_h=aH/kappa' is equal to start_sources_at_tau_c_over_tau_h */ tau_lower = pth->tau_ini; class_call(background_at_tau(pba, tau_lower, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); class_test(pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] > ppr->start_sources_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for computing sources is inappropriate: it corresponds to an earlier time than the one at which the integration of thermodynamical variables started (tau=%g). You should increase either 'start_sources_at_tau_c_over_tau_h' or 'recfast_z_initial'\n", tau_lower); tau_upper = pth->tau_rec; class_call(background_at_tau(pba, tau_upper, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); class_test(pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] < ppr->start_sources_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for computing sources is inappropriate: it corresponds to a time after recombination. You should decrease 'start_sources_at_tau_c_over_tau_h'\n"); tau_mid = 0.5*(tau_lower + tau_upper); while (tau_upper - tau_lower > ppr->tol_tau_approx) { class_call(background_at_tau(pba, tau_mid, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] > ppr->start_sources_at_tau_c_over_tau_h) tau_upper = tau_mid; else tau_lower = tau_mid; tau_mid = 0.5*(tau_lower + tau_upper); } tau_ini = tau_mid; } else { /* case when CMB not requested: start at recombination time */ tau_ini = pth->tau_rec; /* set values of first_index_back/thermo */ class_call(background_at_tau(pba, tau_ini, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); } counter = 1; /** (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where: - if CMB requested: timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$; timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today. - if CMB not requested: timescale_source = 1/aH; repeat till today. */ last_index_back = first_index_back; last_index_thermo = first_index_thermo; tau = tau_ini; while (tau < pba->conformal_age) { class_call(background_at_tau(pba, tau, pba->short_info, pba->inter_closeby, &last_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &last_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (ppt->has_cmb == _TRUE_) { /* variation rate of thermodynamics variables */ rate_thermo = pvecthermo[pth->index_th_rate]; /* variation rate of metric due to late ISW effect (important at late times) */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + 2. * a_prime_over_a * a_prime_over_a; rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a); /* compute rate */ timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared); } else { /* variation rate given by Hubble time */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; timescale_source = a_prime_over_a; } /* check it is non-zero */ class_test(timescale_source == 0., ppt->error_message, "null evolution rate, integration is diverging"); /* compute inverse rate */ timescale_source = 1./timescale_source; class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation, ppt->error_message, "integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source); tau = tau + ppr->perturb_sampling_stepsize*timescale_source; counter++; } /** - infer total number of time steps, ppt->tau_size */ ppt->tau_size = counter; /** - allocate array of time steps, ppt->tau_sampling[index_tau] */ class_alloc(ppt->tau_sampling,ppt->tau_size * sizeof(double),ppt->error_message); /** - repeat the same steps, now filling the array with each tau value: */ /** (a) first sampling point = when the universe stops being opaque */ counter = 0; ppt->tau_sampling[counter]=tau_ini; /** (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$; timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today - if CMB not requested: timescale_source = 1/aH; repeat till today. */ last_index_back = first_index_back; last_index_thermo = first_index_thermo; tau = tau_ini; while (tau < pba->conformal_age) { class_call(background_at_tau(pba, tau, pba->short_info, pba->inter_closeby, &last_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &last_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (ppt->has_cmb == _TRUE_) { /* variation rate of thermodynamics variables */ rate_thermo = pvecthermo[pth->index_th_rate]; /* variation rate of metric due to late ISW effect (important at late times) */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + 2. * a_prime_over_a * a_prime_over_a; rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a); /* compute rate */ timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared); } else { a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; timescale_source = a_prime_over_a; } /* check it is non-zero */ class_test(timescale_source == 0., ppt->error_message, "null evolution rate, integration is diverging"); /* compute inverse rate */ timescale_source = 1./timescale_source; class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation, ppt->error_message, "integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source); tau = tau + ppr->perturb_sampling_stepsize*timescale_source; counter++; ppt->tau_sampling[counter]=tau; } /** - last sampling point = exactly today */ ppt->tau_sampling[counter] = pba->conformal_age; free(pvecback); free(pvecthermo); /** - loop over modes, initial conditions and types. For each of them, allocate array of source functions. */ for (index_md = 0; index_md < ppt->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { class_alloc(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type], ppt->k_size[index_md] * ppt->tau_size * sizeof(double), ppt->error_message); } } } return _SUCCESS_; } /** * Define the number of comoving wavenumbers using the information * passed in the precision structure. * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background strucutre * @param pth Input : pointer to thermodynamics structure * @param ppt Input : pointer to perturbation structure * @param index_md Input: index describing the mode (scalar, tensor, etc.) * @return the error status */ int perturb_get_k_list( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { int index_k, index_k_output, index_mode; double k,k_min=0.,k_rec,step,tau1; double * k_max_cmb; double * k_max_cl; double k_max=0.; double scale2; double *tmp_k_list; int newk_size, index_newk, add_k_output_value; class_test(ppr->k_step_transition == 0., ppt->error_message, "stop to avoid division by zero"); class_test(pth->rs_rec == 0., ppt->error_message, "stop to avoid division by zero"); /* allocate arrays related to k list for each mode */ class_alloc(ppt->k_size_cmb, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k_size_cl, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k_size, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k, ppt->md_size*sizeof(double*), ppt->error_message); class_calloc(k_max_cmb, ppt->md_size, sizeof(double), ppt->error_message); class_calloc(k_max_cl, ppt->md_size, sizeof(double), ppt->error_message); /** - scalar modes */ if (ppt->has_scalars == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((8.-1.e-4)*pba->K); } /** - find k_max (as well as k_max_cmb[ppt->index_md_scalars], k_max_cl[ppt->index_md_scalars]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_scalars] = k_min; k_max_cl[ppt->index_md_scalars] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb[ppt->index_md_scalars] : */ /* choose a k_max_cmb[ppt->index_md_scalars] corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl[ppt->index_md_scalars]*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_scalars] = ppr->k_max_tau0_over_l_max*ppt->l_scalar_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_scalars] = k_max_cmb[ppt->index_md_scalars]; k_max = k_max_cmb[ppt->index_md_scalars]; /* find k_max_cl[ppt->index_md_scalars] : */ /* if we need density/lensing Cl's, we must impose a stronger condition, such that the minimum wavelength on the shell corresponding to the center of smallest redshift bin is seen under an angle smaller than pi/lmax. So we must mutiply our previous k_max_cl[ppt->index_md_scalars] by the ratio tau0/(tau0-tau[center of smallest redhsift bin]). Note that we could do the same with the lensing potential if we needed a very precise C_l^phi-phi at large l. We don't do it by default, because the lensed ClT, ClE would be marginally affected. */ if ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)) { class_call(background_tau_of_z(pba, ppt->selection_mean[0], &tau1), pba->error_message, ppt->error_message); k_max_cl[ppt->index_md_scalars] = MAX(k_max_cl[ppt->index_md_scalars],ppr->k_max_tau0_over_l_max*ppt->l_lss_max/(pba->conformal_age-tau1)); // to be very accurate we should use angular diameter distance to given redhsift instead of comoving radius: would implement corrections dependning on curvature k_max = k_max_cl[ppt->index_md_scalars]; } } /* find k_max: */ if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) k_max = MAX(k_max,ppt->k_max_for_pk); if (ppt->has_nl_corrections_based_on_delta_m == _TRUE_) k_max = MAX(k_max,ppr->halofit_min_k_max); /** - test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in thevicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_scalars], ((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+ (int)(MAX(ppr->k_per_decade_for_pk,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_scalars] */ while (k < k_max_cmb[ppt->index_md_scalars]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_scalars] = index_k; /* values until k_max_cl[ppt->index_md_scalars] */ while (k < k_max_cl[ppt->index_md_scalars]) { k *= pow(10.,1./(ppr->k_per_decade_for_pk +(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk) *(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4))))); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size_cl[ppt->index_md_scalars] = index_k; /* values until k_max */ while (k < k_max) { k *= pow(10.,1./(ppr->k_per_decade_for_pk +(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk) *(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4))))); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size[ppt->index_md_scalars] = index_k; class_realloc(ppt->k[ppt->index_md_scalars], ppt->k[ppt->index_md_scalars], ppt->k_size[ppt->index_md_scalars]*sizeof(double), ppt->error_message); } /** - vector modes */ if (ppt->has_vectors == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((7.-1.e-4)*pba->K); } /** - find k_max (as well as k_max_cmb[ppt->index_md_vectors], k_max_cl[ppt->index_md_vectors]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_vectors] = k_min; k_max_cl[ppt->index_md_vectors] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb: */ /* choose a k_max_cmb corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_vectors] = ppr->k_max_tau0_over_l_max*ppt->l_vector_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_vectors] = k_max_cmb[ppt->index_md_vectors]; k_max = k_max_cmb[ppt->index_md_vectors]; } /** - test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in thevicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_vectors], ((int)((k_max_cmb[ppt->index_md_vectors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_vectors][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_vectors] */ while (k < k_max_cmb[ppt->index_md_vectors]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_vectors][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_vectors] = index_k; ppt->k_size_cl[ppt->index_md_vectors] = index_k; ppt->k_size[ppt->index_md_vectors] = index_k; class_realloc(ppt->k[ppt->index_md_vectors], ppt->k[ppt->index_md_vectors], ppt->k_size[ppt->index_md_vectors]*sizeof(double), ppt->error_message); } /** - tensor modes */ if (ppt->has_tensors == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((6.-1.e-4)*pba->K); } /** - find k_max (as well as k_max_cmb[ppt->index_md_tensors], k_max_cl[ppt->index_md_tensors]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_tensors] = k_min; k_max_cl[ppt->index_md_tensors] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb[ppt->index_md_tensors]: */ /* choose a k_max_cmb[ppt->index_md_tensors] corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl[ppt->index_md_tensors]*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_tensors] = ppr->k_max_tau0_over_l_max*ppt->l_tensor_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_tensors] = k_max_cmb[ppt->index_md_tensors]; k_max = k_max_cmb[ppt->index_md_tensors]; } /** - test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in thevicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_tensors], ((int)((k_max_cmb[ppt->index_md_tensors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_tensors][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_tensors] */ while (k < k_max_cmb[ppt->index_md_tensors]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_tensors][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_tensors][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_tensors] = index_k; ppt->k_size_cl[ppt->index_md_tensors] = index_k; ppt->k_size[ppt->index_md_tensors] = index_k; class_realloc(ppt->k[ppt->index_md_tensors], ppt->k[ppt->index_md_tensors], ppt->k_size[ppt->index_md_tensors]*sizeof(double), ppt->error_message); } /* If user asked for k_output_values, add those to all k lists: */ if (ppt->k_output_values_num>0){ /* Allocate storage */ class_alloc(ppt->index_k_output_values,sizeof(double)*ppt->md_size*ppt->k_output_values_num,ppt->error_message); /** Find indices in ppt->k[index_md] corresponding to 'k_output_values'. We are assuming that ppt->k is sorted and growing, and we have made sure that ppt->k_output_values is also sorted and growing.*/ for (index_mode=0; index_mode<ppt->md_size; index_mode++){ newk_size = ppt->k_size[index_mode]+ppt->k_output_values_num; class_alloc(tmp_k_list,sizeof(double)*newk_size,ppt->error_message); index_k=0; index_k_output=0; for (index_newk=0; index_newk<newk_size; index_newk++){ /** Decide if we should add k_output_value now. This has to be this complicated, since we can only compare the k-values when both indices are in range.*/ if (index_k >= ppt->k_size[index_mode]) add_k_output_value = _TRUE_; else if (index_k_output >= ppt->k_output_values_num) add_k_output_value = _FALSE_; else if (ppt->k_output_values[index_k_output] < ppt->k[index_mode][index_k]) add_k_output_value = _TRUE_; else add_k_output_value = _FALSE_; if (add_k_output_value == _TRUE_){ tmp_k_list[index_newk] = ppt->k_output_values[index_k_output]; ppt->index_k_output_values[index_mode*ppt->k_output_values_num+index_k_output]=index_newk; index_k_output++; } else{ tmp_k_list[index_newk] = ppt->k[index_mode][index_k]; index_k++; } } free(ppt->k[index_mode]); ppt->k[index_mode] = tmp_k_list; ppt->k_size[index_mode] = newk_size; index_k = newk_size-1; while (ppt->k[index_mode][index_k] > k_max_cl[index_mode]) index_k--; ppt->k_size_cl[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]); index_k = newk_size-1; while (ppt->k[index_mode][index_k] > k_max_cmb[index_mode]) index_k--; ppt->k_size_cmb[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]); /** The two MIN statements is here because in a normal run, the cl and cmb arrays contain a single k value larger than their respective k_max. We are mimicking this behaviour. */ } } /* For testing, can be useful to print the k list in a file: FILE * out=fopen("output/k","w"); for (index_k=0; index_k < ppt->k_size[0]; index_k++) { fprintf(out,"%e\n",ppt->k[0][index_k],pba->K); } fclose(out); */ /* finally, find the global k_min and k_max for the ensemble of all modes 9scalars, vectors, tensors) */ ppt->k_min = _HUGE_; ppt->k_max = 0.; if (ppt->has_scalars == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_scalars][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_scalars][ppt->k_size[ppt->index_md_scalars]-1]); /* last value, inferred from perturbations structure */ } if (ppt->has_vectors == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_vectors][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_vectors][ppt->k_size[ppt->index_md_vectors]-1]); /* last value, inferred from perturbations structure */ } if (ppt->has_tensors == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_tensors][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_tensors][ppt->k_size[ppt->index_md_tensors]-1]); /* last value, inferred from perturbations structure */ } free(k_max_cmb); free(k_max_cl); return _SUCCESS_; } /** * Initialize a perturb_workspace structure. All fields are allocated * here, with the exception of the perturb_vector '->pv' field, which * is allocated separately in perturb_vector_init. We allocate one * such perturb_workspace structure per thread and per mode * (scalar/../tensor). Then, for each thread, all initial conditions * and wavenumbers will use the same workspace. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param ppw Input/Output: pointer to perturb_workspace structure which fields are allocated or filled here * @return the error status */ int perturb_workspace_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ int index_mt=0; int index_ap; int l; /** Compute maximum l_max for any multipole */; if (_scalars_) { ppw->max_l_max = MAX(ppr->l_max_g, ppr->l_max_pol_g); if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur); if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm); if (pba->has_dr == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_dr); } if (_tensors_) { ppw->max_l_max = MAX(ppr->l_max_g_ten, ppr->l_max_pol_g_ten); if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur); if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm); } /** Allocate s_l[] array for freestreaming of multipoles (see arXiv:1305.3261) and initialise to 1.0 which is the K=0 value. */ class_alloc(ppw->s_l, sizeof(double)*(ppw->max_l_max+1),ppt->error_message); for (l=0; l<=ppw->max_l_max; l++){ ppw->s_l[l] = 1.0; } /** - define indices of metric perturbations obeying to constraint equations (this can be done once and for all, because the vector of metric perturbations is the same whatever the approximation scheme, unlike the vector of quantities to be integrated, which is allocated separately in perturb_vector_init) */ if (_scalars_) { /* newtonian gauge */ if (ppt->gauge == newtonian) { class_define_index(ppw->index_mt_psi,_TRUE_,index_mt,1); /* psi */ class_define_index(ppw->index_mt_phi_prime,_TRUE_,index_mt,1); /* phi' */ } /* synchronous gauge (note that eta is counted in the vector of quantities to be integrated, while here we only consider quantities obeying to constraint equations) */ if (ppt->gauge == synchronous) { class_define_index(ppw->index_mt_h_prime,_TRUE_,index_mt,1); /* h' */ class_define_index(ppw->index_mt_h_prime_prime,_TRUE_,index_mt,1); /* h'' */ class_define_index(ppw->index_mt_eta_prime,_TRUE_,index_mt,1); /* eta' */ class_define_index(ppw->index_mt_alpha,_TRUE_,index_mt,1); /* alpha = (h' + 6 tau') / (2 k**2) */ class_define_index(ppw->index_mt_alpha_prime,_TRUE_,index_mt,1); /* alpha' */ class_define_index(ppw->index_mt_vx_smg,pba->has_smg,index_mt,1); /* vx_smg (can be dynamical or not) */ class_define_index(ppw->index_mt_vx_prime_smg,pba->has_smg,index_mt,1); /* vx_smg' (can be dynamical or not) */ class_define_index(ppw->index_mt_vx_prime_prime_smg,pba->has_smg,index_mt,1); /* vx_smg'' (passed to integrator) */ } } if (_vectors_) { /* newtonian gauge */ if (ppt->gauge == newtonian) { class_define_index(ppw->index_mt_V_prime,_TRUE_,index_mt,1); } if (ppt->gauge == synchronous) { class_define_index(ppw->index_mt_hv_prime_prime,_TRUE_,index_mt,1); } } if (_tensors_) { class_define_index(ppw->index_mt_gw_prime_prime,_TRUE_,index_mt,1); } ppw->mt_size = index_mt; /** - allocate some workspace in which we will store temporarily the values of background, thermodynamics, metric and source quantities at a given time */ class_alloc(ppw->pvecback,pba->bg_size_normal*sizeof(double),ppt->error_message); class_alloc(ppw->pvecthermo,pth->th_size*sizeof(double),ppt->error_message); class_alloc(ppw->pvecmetric,ppw->mt_size*sizeof(double),ppt->error_message); /** - count number of approximation, initialize their indices, and allocate their flags */ index_ap=0; class_define_index(ppw->index_ap_tca,_TRUE_,index_ap,1); class_define_index(ppw->index_ap_rsa,_TRUE_,index_ap,1); if (_scalars_) { class_define_index(ppw->index_ap_ufa,pba->has_ur,index_ap,1); class_define_index(ppw->index_ap_ncdmfa,pba->has_ncdm,index_ap,1); } ppw->ap_size=index_ap; if (ppw->ap_size > 0) class_alloc(ppw->approx,ppw->ap_size*sizeof(int),ppt->error_message); /** - For definitness, initialize approximation flags to arbitrary values (correct values are overwritten in pertub_find_approximation_switches) */ if (_scalars_) { ppw->approx[ppw->index_ap_tca]=(int)tca_on; ppw->approx[ppw->index_ap_rsa]=(int)rsa_off; if (pba->has_ur == _TRUE_) { ppw->approx[ppw->index_ap_ufa]=(int)ufa_off; } if (pba->has_ncdm == _TRUE_) { ppw->approx[ppw->index_ap_ncdmfa]=(int)ncdmfa_off; } } if (_tensors_) { ppw->approx[ppw->index_ap_tca]=(int)tca_on; ppw->approx[ppw->index_ap_rsa]=(int)rsa_off; } /** - allocate fields where some of the perturbations are stored */ if (_scalars_) { if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { class_alloc(ppw->delta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppw->theta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppw->shear_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); } } return _SUCCESS_; } /** * Free the perturb_workspace structure (with the exception of the * perturb_vector '->pv' field, which is freed separately in * perturb_vector_free). * * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param ppw Input: pointer to perturb_workspace structure to be freed * @return the error status */ int perturb_workspace_free ( struct perturbs * ppt, int index_md, struct perturb_workspace * ppw ) { free(ppw->s_l); free(ppw->pvecback); free(ppw->pvecthermo); free(ppw->pvecmetric); if (ppw->ap_size > 0) free(ppw->approx); if (_scalars_) { if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { free(ppw->delta_ncdm); free(ppw->theta_ncdm); free(ppw->shear_ncdm); } } free(ppw); return _SUCCESS_; } /** * Solve the perturbation evolution for a given mode, initial * condition and wavenumber, and compute the corresponding source * functions. * * For a given mode, initial condition and wavenumber, this function * finds the time ranges over witch the perturbations can be described * within a given approximation. For each such range, it initializes * (or redistribute) perturbations using perturb_vector_init(), and * integrates over time. Whenever a "source sampling time" is passed, * the source terms are computed and stored in the source table using * perturb_sources(). * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input/Output: pointer to the perturbation structure (output source functions S(k,tau) written here) * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param index_k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @return the error status */ int perturb_solve( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, int index_ic, int index_k, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ /* contains all fixed parameters, indices and workspaces used by the perturb_derivs function */ struct perturb_parameters_and_workspace ppaw; /* conformal time */ double tau,tau_lower,tau_upper,tau_mid; /* multipole */ int l; /* index running over time */ int index_tau; /* number of values in the tau_sampling array that should be considered for a given mode */ int tau_actual_size; /* running index over types (temperature, etc) */ int index_type; /* fourier mode */ double k; /* number of time intervals where the approximation scheme is uniform */ int interval_number; /* index running over such time intervals */ int index_interval; /* number of time intervals where each particular approximation is uniform */ int * interval_number_of; /* edge of intervals where approximation scheme is uniform: tau_ini, tau_switch_1, ..., tau_end */ double * interval_limit; /* array of approximation scheme within each interval: interval_approx[index_interval][index_ap] */ int ** interval_approx; /* index running over approximations */ int index_ap; /* approximation scheme within previous interval: previous_approx[index_ap] */ int * previous_approx; int n_ncdm,is_early_enough; /* function pointer to ODE evolver and names of possible evolvers */ extern int evolver_rk(); extern int evolver_ndf15(); int (*generic_evolver)(); /* Related to the perturbation output */ int (*perhaps_print_variables)(); int index_ikout; /** - initialize indices relevant for back/thermo tables search */ ppw->last_index_back=0; ppw->last_index_thermo=0; ppw->inter_mode = pba->inter_normal; /** - get wavenumber value */ k = ppt->k[index_md][index_k]; class_test(k == 0., ppt->error_message, "stop to avoid division by zero"); /** If non-zero curvature, update array of free-streaming coefficients ppw->s_l */ if (pba->has_curvature == _TRUE_){ for (l = 0; l<=ppw->max_l_max; l++){ ppw->s_l[l] = sqrt(MAX(1.0-pba->K*(l*l-1.0)/k/k,0.)); } } /** - maximum value of tau for which sources are calculated for this wavenumber */ /* by default, today */ tau_actual_size = ppt->tau_size; /** - using bisection, compute minimum value of tau for which this wavenumber is integrated */ /* will be at least the first time in the background table */ tau_lower = pba->tau_table[0]; class_call(background_at_tau(pba, tau_lower, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., pth->inter_normal, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /* check that this initial time is indeed OK given imposed conditions on kappa' and on k/aH */ class_test(ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa] > ppr->start_small_k_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_small_k_at_tau_c_over_tau_h' up to at least %g, or decrease 'a_ini_over_a_today_default'\n", ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa]); class_test(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] > ppr->start_large_k_at_tau_h_over_tau_k, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_large_k_at_tau_h_over_tau_k' up to at least %g, or decrease 'a_ini_over_a_today_default'\n", ppt->k[index_md][ppt->k_size[index_md]-1]/ppw->pvecback[pba->index_bg_a]/ ppw->pvecback[pba->index_bg_H]); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { class_test(fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.)>ppr->tol_ncdm_initial_w, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time at which the ncdm species number %d is not ultra-relativistic anymore, with w=%g, p=%g and rho=%g\n", n_ncdm, ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm], ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm], ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]); } } /* is at most the time at which sources must be sampled */ tau_upper = ppt->tau_sampling[0]; /* start bisection */ tau_mid = 0.5*(tau_lower + tau_upper); while ((tau_upper - tau_lower)/tau_lower > ppr->tol_tau_approx) { is_early_enough = _TRUE_; class_call(background_at_tau(pba, tau_mid, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); /* if there are non-cold relics, check that they are relativistic enough */ if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { if (fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.) > ppr->tol_ncdm_initial_w) is_early_enough = _FALSE_; } } /* also check that the two conditions on (aH/kappa') and (aH/k) are fulfilled */ if (is_early_enough == _TRUE_) { class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); if ((ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa] > ppr->start_small_k_at_tau_c_over_tau_h) || (k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] > ppr->start_large_k_at_tau_h_over_tau_k)) is_early_enough = _FALSE_; } if (is_early_enough == _TRUE_) tau_lower = tau_mid; else tau_upper = tau_mid; tau_mid = 0.5*(tau_lower + tau_upper); } tau = tau_mid; /** - find the number of intervals over which approximation scheme is constant */ class_alloc(interval_number_of,ppw->ap_size*sizeof(int),ppt->error_message); ppw->inter_mode = pba->inter_normal; class_call(perturb_find_approximation_number(ppr, pba, pth, ppt, index_md, k, ppw, tau, ppt->tau_sampling[tau_actual_size-1], &interval_number, interval_number_of), ppt->error_message, ppt->error_message); class_alloc(interval_limit,(interval_number+1)*sizeof(double),ppt->error_message); class_alloc(interval_approx,interval_number*sizeof(int*),ppt->error_message); for (index_interval=0; index_interval<interval_number; index_interval++) class_alloc(interval_approx[index_interval],ppw->ap_size*sizeof(int),ppt->error_message); class_call(perturb_find_approximation_switches(ppr, pba, pth, ppt, index_md, k, ppw, tau, ppt->tau_sampling[tau_actual_size-1], ppr->tol_tau_approx, interval_number, interval_number_of, interval_limit, interval_approx), ppt->error_message, ppt->error_message); free(interval_number_of); /** - fill the structure containing all fixed parameters, indices and workspaces needed by perturb_derivs */ ppaw.ppr = ppr; ppaw.pba = pba; ppaw.pth = pth; ppaw.ppt = ppt; ppaw.index_md = index_md; ppaw.index_ic = index_ic; ppaw.index_k = index_k; ppaw.k = k; ppaw.ppw = ppw; ppaw.ppw->inter_mode = pba->inter_closeby; ppaw.ppw->last_index_back = 0; ppaw.ppw->last_index_thermo = 0; /** - check whether we need to print perturbations to a file for this wavenumber */ perhaps_print_variables = NULL; ppw->index_ikout = -1; for (index_ikout=0; index_ikout<ppt->k_output_values_num; index_ikout++){ if (ppt->index_k_output_values[index_md*ppt->k_output_values_num+index_ikout] == index_k){ ppw->index_ikout = index_ikout; perhaps_print_variables = perturb_print_variables; /**class_call(perturb_prepare_output_file( pba,ppt,ppw,index_ikout,index_md), ppt->error_message, ppt->error_message); */ } } /** - loop over intervals over which approximatiomn scheme is uniform. For each interval: */ for (index_interval=0; index_interval<interval_number; index_interval++) { /** (a) fix the approximation scheme */ for (index_ap=0; index_ap<ppw->ap_size; index_ap++) ppw->approx[index_ap]=interval_approx[index_interval][index_ap]; /** (b) get the previous approximation scheme. If the current interval starts from the initial time tau_ini, the previous approximation is set to be a NULL pointer, so that the function perturb_vector_init() knows that perturbations must be initialized */ if (index_interval==0) { previous_approx=NULL; } else { previous_approx=interval_approx[index_interval-1]; } /** (c) define the vector of perturbations to be integrated over. If the current interval starts from the initial time tau_ini, fill the vector with initial conditions for each mode. If it starts from an approximation switching point, redistribute correctly the perturbations from the previous to the new vector of perturbations. */ class_call(perturb_vector_init(ppr, pba, pth, ppt, index_md, index_ic, k, interval_limit[index_interval], ppw, previous_approx), ppt->error_message, ppt->error_message); /** (d) integrate the perturbations over the current interval. */ if(ppr->evolver == rk){ generic_evolver = evolver_rk; } else{ generic_evolver = evolver_ndf15; } class_call(generic_evolver(perturb_derivs, interval_limit[index_interval], interval_limit[index_interval+1], ppw->pv->y, ppw->pv->used_in_sources, ppw->pv->pt_size, &ppaw, ppr->tol_perturb_integration, ppr->smallest_allowed_variation, perturb_timescale, ppr->perturb_integration_stepsize, ppt->tau_sampling, tau_actual_size, perturb_sources, perhaps_print_variables, ppt->error_message), ppt->error_message, ppt->error_message); } /** - if perturbations were printed in a file, close the file */ //if (perhaps_print_variables != NULL) // fclose(ppw->perturb_output_file); /** fill the source terms array with zeros for all times between then last integrated time tau_max and tau_today. */ for (index_tau = tau_actual_size; index_tau < ppt->tau_size; index_tau++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + index_type] [index_tau * ppt->k_size[index_md] + index_k] = 0.; } } /** - free quantitites allocated at the beginning of the routine */ class_call(perturb_vector_free(ppw->pv), ppt->error_message, ppt->error_message); for (index_interval=0; index_interval<interval_number; index_interval++) free(interval_approx[index_interval]); free(interval_approx); free(interval_limit); return _SUCCESS_; } int perturb_prepare_output(struct background * pba, struct perturbs * ppt){ int n_ncdm; char tmp[40]; ppt->scalar_titles[0]='\0'; ppt->vector_titles[0]='\0'; ppt->tensor_titles[0]='\0'; if (ppt->k_output_values_num > 0) { /** Write titles for all perturbations that we would like to print/store. */ if (ppt->has_scalars == _TRUE_){ class_store_columntitle(ppt->scalar_titles,"tau [Mpc]",_TRUE_); class_store_columntitle(ppt->scalar_titles,"a",_TRUE_); class_store_columntitle(ppt->scalar_titles,"delta_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"theta_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"shear_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol0_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol1_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol2_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"delta_b",_TRUE_); class_store_columntitle(ppt->scalar_titles,"theta_b",_TRUE_); class_store_columntitle(ppt->scalar_titles,"psi",_TRUE_); class_store_columntitle(ppt->scalar_titles,"phi",_TRUE_); /* Perturbed recombination */ class_store_columntitle(ppt->scalar_titles,"delta_Tb",ppt->has_perturbed_recombination); class_store_columntitle(ppt->scalar_titles,"delta_chi",ppt->has_perturbed_recombination); /* Ultrarelativistic species */ class_store_columntitle(ppt->scalar_titles,"delta_ur",pba->has_ur); class_store_columntitle(ppt->scalar_titles,"theta_ur",pba->has_ur); class_store_columntitle(ppt->scalar_titles,"shear_ur",pba->has_ur); /* Cold dark matter */ class_store_columntitle(ppt->scalar_titles,"delta_cdm",pba->has_cdm); class_store_columntitle(ppt->scalar_titles,"theta_cdm",pba->has_cdm); /* Non-cold dark matter */ if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ sprintf(tmp,"delta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"theta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"shear_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); } } /* Decaying cold dark matter */ class_store_columntitle(ppt->scalar_titles, "delta_dcdm", pba->has_dcdm); class_store_columntitle(ppt->scalar_titles, "theta_dcdm", pba->has_dcdm); /* Decay radiation */ class_store_columntitle(ppt->scalar_titles, "delta_dr", pba->has_dr); class_store_columntitle(ppt->scalar_titles, "theta_dr", pba->has_dr); class_store_columntitle(ppt->scalar_titles, "shear_dr", pba->has_dr); /* Scalar field scf */ class_store_columntitle(ppt->scalar_titles, "delta_scf", pba->has_scf); class_store_columntitle(ppt->scalar_titles, "theta_scf", pba->has_scf); /* Scalar field smg */ class_store_columntitle(ppt->scalar_titles, "V_x_smg", pba->has_smg); class_store_columntitle(ppt->scalar_titles, "V_x_prime_smg", pba->has_smg); ppt->number_of_scalar_titles = get_number_of_titles(ppt->scalar_titles); } if (ppt->has_tensors == _TRUE_){ class_store_columntitle(ppt->tensor_titles,"tau [Mpc]",_TRUE_); class_store_columntitle(ppt->tensor_titles,"a",_TRUE_); class_store_columntitle(ppt->tensor_titles,"delta_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"shear_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"l4_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol0_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol2_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol4_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"H (gw)",_TRUE_); class_store_columntitle(ppt->tensor_titles,"Hdot (gwdot)",_TRUE_); class_store_columntitle(ppt->tensor_titles,"delta_ur",ppt->evolve_tensor_ur); class_store_columntitle(ppt->tensor_titles,"shear_ur",ppt->evolve_tensor_ur); class_store_columntitle(ppt->tensor_titles,"l4_ur",ppt->evolve_tensor_ur); if (ppt->evolve_tensor_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ sprintf(tmp,"delta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); sprintf(tmp,"theta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); sprintf(tmp,"shear_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); } } ppt->number_of_tensor_titles = get_number_of_titles(ppt->tensor_titles); } } return _SUCCESS_; } /** * For a given mode and wavenumber, find the number of interval of * times bewteen tau_ini and tau_end such that the approximation * scheme (and the number of perturbation equations) is uniform. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @param tau_ini Input: initial time of the perturbation integration * @param tau_end Input: final time of the perturbation integration * @param interval_number Output: total number of intervals * @param interval_number_of Output: number of intervals with respect to each particular approximation * @return the error status */ int perturb_find_approximation_number( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, struct perturb_workspace * ppw, double tau_ini, double tau_end, int * interval_number, int * interval_number_of /* interval_number_of[index_ap] (already allocated) */ ){ /* index running over approximations */ int index_ap; /* value of a given approximation at tau_ini and tau_end */ int flag_ini,flag_end; /** - fix default number of intervals to one (if no approximation switch) */ *interval_number=1; /** - loop over each approximation and add the number of approximation switching times */ for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_ini, ppw), ppt->error_message, ppt->error_message); flag_ini = ppw->approx[index_ap]; class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_end, ppw), ppt->error_message, ppt->error_message); flag_end = ppw->approx[index_ap]; class_test(flag_end<flag_ini, ppt->error_message, "For each approximation scheme, the declaration of approximation labels in the enumeration must follow chronological order, e.g: enum approx_flags {flag1, flag2, flag3} with flag1 being the initial one and flag3 the final one"); *interval_number += flag_end-flag_ini; interval_number_of[index_ap] = flag_end-flag_ini+1; } return _SUCCESS_; } /** * For a given mode and wavenumber, find the values of time at which * the approximation changes. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @param tau_ini Input: initial time of the perturbation integration * @param tau_end Input: final time of the perturbation integration * @param interval_number Input: total number of intervals * @param interval_number_of Input: number of intervals with respect to each particular approximation * @param interval_limit Output: value of time at the boundary of the intervals: tau_ini, tau_switch1, ..., tau_end * @param interval_approx Output: value of approximations in each interval * @return the error status */ int perturb_find_approximation_switches( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, struct perturb_workspace * ppw, double tau_ini, double tau_end, double precision, int interval_number, int * interval_number_of, double * interval_limit, /* interval_limit[index_interval] (already allocated) */ int ** interval_approx /* interval_approx[index_interval][index_ap] (already allocated) */ ){ int index_ap; int index_switch; int index_switch_tot; int num_switch; double tau_min,lower_bound,upper_bound; double mid=0; double * unsorted_tau_switch; double next_tau_switch; int flag_ini; int num_switching_at_given_time; /** - write in output arrays the initial time and approximation */ interval_limit[0]=tau_ini; class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_ini, ppw), ppt->error_message, ppt->error_message); for (index_ap=0; index_ap<ppw->ap_size; index_ap++) interval_approx[0][index_ap]=ppw->approx[index_ap]; /** - if there are no approximation switches, just write final time and return */ if (interval_number == 1) { interval_limit[1]=tau_end; } /** - if there are switches, consider approximations one after each other. Find switching time by bisection. Store all switches in arbitrary order in array unsorted_tau_switch[] */ else { class_alloc(unsorted_tau_switch,(interval_number-1)*sizeof(double),ppt->error_message); index_switch_tot=0; for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { if (interval_number_of[index_ap] > 1) { num_switch = interval_number_of[index_ap]-1; tau_min = tau_ini; flag_ini = interval_approx[0][index_ap]; for (index_switch=0; index_switch<num_switch; index_switch++) { lower_bound=tau_min; upper_bound=tau_end; mid = 0.5*(lower_bound+upper_bound); while (upper_bound - lower_bound > precision) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, mid, ppw), ppt->error_message, ppt->error_message); if (ppw->approx[index_ap] > flag_ini+index_switch) { upper_bound=mid; } else { lower_bound=mid; } mid = 0.5*(lower_bound+upper_bound); } unsorted_tau_switch[index_switch_tot]=mid; index_switch_tot++; tau_min=mid; } } } class_test(index_switch_tot != (interval_number-1), ppt->error_message, "bug in approximation switch search routine: should have %d = %d", index_switch_tot,interval_number-1); /** - now sort interval limits in correct order */ index_switch_tot=1; while (index_switch_tot < interval_number) { next_tau_switch=tau_end; for (index_switch=0; index_switch<interval_number-1; index_switch++) { if ((unsorted_tau_switch[index_switch] > interval_limit[index_switch_tot-1]) && (unsorted_tau_switch[index_switch] < next_tau_switch)) { next_tau_switch=unsorted_tau_switch[index_switch]; } } interval_limit[index_switch_tot]=next_tau_switch; index_switch_tot++; } interval_limit[index_switch_tot]=tau_end; class_test(index_switch_tot != interval_number, ppt->error_message, "most probably two approximation switching time were found to be equal, which cannot be handled\n"); /** - store each approximation in chronological order */ for (index_switch=1; index_switch<interval_number; index_switch++) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, 0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]), ppw), ppt->error_message, ppt->error_message); for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { interval_approx[index_switch][index_ap]=ppw->approx[index_ap]; /* check here that approximation does not go backward (remember that by definition the value of an approximation can only increase) */ class_test(interval_approx[index_switch][index_ap] < interval_approx[index_switch-1][index_ap], ppt->error_message, "The approximation with label %d is not defined correctly: it goes backward (from %d to %d) for k=%e and between tau=%e and %e; this cannot be handled\n", index_ap, interval_approx[index_switch-1][index_ap], interval_approx[index_switch][index_ap], k, 0.5*(interval_limit[index_switch-1]+interval_limit[index_switch]), 0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]) ); } /* check here that more than one approximation is not switched on at a given time */ num_switching_at_given_time=0; for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { if (interval_approx[index_switch][index_ap] != interval_approx[index_switch-1][index_ap]) num_switching_at_given_time++; } class_test(num_switching_at_given_time != 1, ppt->error_message, "for k=%e, at tau=%g, you switch %d approximations at the same time, this cannot be handled. Usually happens in two cases: triggers for different approximations coincide, or one approx is reversible\n", k, interval_limit[index_switch], num_switching_at_given_time); if (ppt->perturbations_verbose>2) { if (_scalars_) { if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) && (interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off)) fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //fprintf(stderr,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //TBC if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) && (interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on)) fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]); if (pba->has_ur == _TRUE_) { if ((interval_approx[index_switch-1][ppw->index_ap_ufa]==(int)ufa_off) && (interval_approx[index_switch][ppw->index_ap_ufa]==(int)ufa_on)) { fprintf(stdout,"Mode k=%e: will switch on ur fluid approximation at tau=%e\n",k,interval_limit[index_switch]); } } if (pba->has_ncdm == _TRUE_) { if ((interval_approx[index_switch-1][ppw->index_ap_ncdmfa]==(int)ncdmfa_off) && (interval_approx[index_switch][ppw->index_ap_ncdmfa]==(int)ncdmfa_on)) { fprintf(stdout,"Mode k=%e: will switch on ncdm fluid approximation at tau=%e\n",k,interval_limit[index_switch]); } } } if (_tensors_) { if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) && (interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off)) fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation for tensors at tau=%e\n",k,interval_limit[index_switch]); if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) && (interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on)) fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation for tensors at tau=%e\n",k,interval_limit[index_switch]); } } } free(unsorted_tau_switch); class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_end, ppw), ppt->error_message, ppt->error_message); } return _SUCCESS_; } /** * Initialize the field '->pv' of a perturb_workspace structure, which * is a perturb_vector structure. This structure contains indices and * values of all quantitites which need to be integrated with respect * to time (and only them: quantitites fixed analytically or obeying a * constraint equations are NOT included in this vector). This routine * distinguishes between two cases: * * -> the input pa_old is set to the NULL pointer: * * This happens when we start integrating over a new wavenumber and we * want to set initial conditions for the perturbations. Then, it is * assumed that ppw->pv is not yet alloacted. This routine allocates * it, defines all indices, and then fill the vector ppw->pv->y with * the initial conditions defined in perturb_initial_conditions. * * -> the input pa_old is not set to the NULL pointer and describes * some set of approximations: * * This happens when we need to change of approximation scheme while * integrating over a given wavenumber. The new approximation * described by ppw->pa is then different from pa_old. Then, this * routine allocates a new vector with a new size and new index * values; it fills this vector with initial condtions taken from the * previous vector passed as an input in ppw->pv, and eventually with * some analytic approximations for the new variables appearing at * this time; then the new vector comes in replacement of the old one, * which is freed. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: workspace containing input the approximation scheme, the background/thermodynamics/metric quantitites, and eventually the previous vector y; and in output the new vector y. * @param pa_old Input: NULL is we need to set y to initial conditions for a new wavnumber; points towards a perturb_approximations if we want to switch of approximation. * @return the error status */ int perturb_vector_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, int index_ic, double k, double tau, struct perturb_workspace * ppw, /* ppw->pv unallocated if pa_old = NULL, allocated and filled otherwise */ int * pa_old ) { /** Summary: */ /** - define local variables */ struct perturb_vector * ppv; int index_pt; int l; int n_ncdm,index_q,ncdm_l_size; double rho_plus_p_ncdm,q,q2,epsilon,a,factor; /** - allocate a new perturb_vector structure to which ppw->pv will point at the end of the routine */ class_alloc(ppv,sizeof(struct perturb_vector),ppt->error_message); /** - initialize pointers to NULL (they will be allocated later if needed), relevant for perturb_vector_free() */ ppv->l_max_ncdm = NULL; ppv->q_size_ncdm = NULL; /** - defines all indices in this new vector (depends on approximation scheme, described by the input structure ppw->pa) */ index_pt = 0; if (_scalars_) { /* reject inconsistent values of the number of mutipoles in photon temperature hierachy */ class_test(ppr->l_max_g < 4, ppt->error_message, "ppr->l_max_g should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third and fourth momentum"); /* reject inconsistent values of the number of mutipoles in photon polarization hierachy */ class_test(ppr->l_max_pol_g < 4, ppt->error_message, "ppr->l_max_pol_g should be at least 4"); /* reject inconsistent values of the number of mutipoles in decay radiation hierachy */ if (pba->has_dr == _TRUE_) { class_test(ppr->l_max_dr < 4, ppt->error_message, "ppr->l_max_dr should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum"); } /* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierachy */ if (pba->has_ur == _TRUE_) { class_test(ppr->l_max_ur < 4, ppt->error_message, "ppr->l_max_ur should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum"); } /* photons */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ /* temperature */ ppv->l_max_g = ppr->l_max_g; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* higher momenta */ /* polarization */ ppv->l_max_pol_g = ppr->l_max_pol_g; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); } } /* baryons */ class_define_index(ppv->index_pt_delta_b,_TRUE_,index_pt,1); /* baryon density */ class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* baryon velocity */ /* cdm */ class_define_index(ppv->index_pt_delta_cdm,pba->has_cdm,index_pt,1); /* cdm density */ class_define_index(ppv->index_pt_theta_cdm,pba->has_cdm && (ppt->gauge == newtonian),index_pt,1); /* cdm velocity */ /* dcdm */ class_define_index(ppv->index_pt_delta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm density */ class_define_index(ppv->index_pt_theta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm velocity */ /* ultra relativistic decay radiation */ if (pba->has_dr==_TRUE_){ ppv->l_max_dr = ppr->l_max_dr; class_define_index(ppv->index_pt_F0_dr,_TRUE_,index_pt,ppv->l_max_dr+1); /* all momenta in Boltzmann hierarchy */ } /* fluid */ class_define_index(ppv->index_pt_delta_fld,pba->has_fld,index_pt,1); /* fluid density */ class_define_index(ppv->index_pt_theta_fld,pba->has_fld,index_pt,1); /* fluid velocity */ /* scalar field */ class_define_index(ppv->index_pt_phi_scf,pba->has_scf,index_pt,1); /* scalar field density */ class_define_index(ppv->index_pt_phi_prime_scf,pba->has_scf,index_pt,1); /* scalar field velocity */ /* scalar field: integration indices */ class_define_index(ppv->index_pt_vx_smg,pba->has_smg,index_pt,1); /* dynamical scalar field perturbation */ class_define_index(ppv->index_pt_vx_prime_smg,pba->has_smg,index_pt,1); /* dynamical scalar field velocity */ /* perturbed recombination: the indices are defined once tca is off. */ if ( (ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ class_define_index(ppv->index_pt_perturbed_recombination_delta_temp,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_perturbed_recombination_delta_chi,_TRUE_,index_pt,1); } /* ultra relativistic neutrinos */ if (pba->has_ur && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { class_define_index(ppv->index_pt_delta_ur,_TRUE_,index_pt,1); /* density of ultra-relativistic neutrinos/relics */ class_define_index(ppv->index_pt_theta_ur,_TRUE_,index_pt,1); /* velocity of ultra-relativistic neutrinos/relics */ class_define_index(ppv->index_pt_shear_ur,_TRUE_,index_pt,1); /* shear of ultra-relativistic neutrinos/relics */ if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->l_max_ur = ppr->l_max_ur; class_define_index(ppv->index_pt_l3_ur,_TRUE_,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */ } } /* non-cold dark matter */ if (pba->has_ncdm == _TRUE_) { ppv->index_pt_psi0_ncdm1 = index_pt; /* density of ultra-relativistic neutrinos/relics */ ppv->N_ncdm = pba->N_ncdm; class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ // Set value of ppv->l_max_ncdm: if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_off){ /* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierachy */ class_test(ppr->l_max_ncdm < 4, ppt->error_message, "ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm); //Copy value from precision parameter: ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm; ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm]; } else{ // In the fluid approximaation, hierarcy is cut at lmax = 2 and q dependance is integrated out: ppv->l_max_ncdm[n_ncdm] = 2; ppv->q_size_ncdm[n_ncdm] = 1; } index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm]; } } /* metric (only quantitites to be integrated, not those obeying constraint equations) */ /* metric perturbation eta of synchronous gauge */ class_define_index(ppv->index_pt_eta,ppt->gauge == synchronous,index_pt,1); /* metric perturbation phi of newtonian gauge ( we could fix it using Einstein equations as a constraint equation for phi, but integration is numerically more stable if we actually evolve phi) */ class_define_index(ppv->index_pt_phi,ppt->gauge == newtonian,index_pt,1); } if (_vectors_) { /* Vector baryon velocity: v_b^{(1)}. */ class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* eventually reject inconsistent values of the number of mutipoles in photon temperature hierachy and polarisation*/ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppv->l_max_g = ppr->l_max_g_ten; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */ ppv->l_max_pol_g = ppr->l_max_pol_g_ten; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */ class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */ class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */ class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */ } } /** (b) metric perturbations V or h_v depending on gauge */ if (ppt->gauge == synchronous){ class_define_index(ppv->index_pt_hv_prime,_TRUE_,index_pt,1); } if (ppt->gauge == newtonian){ class_define_index(ppv->index_pt_V,_TRUE_,index_pt,1); } } if (_tensors_) { /* reject inconsistent values of the number of mutipoles in photon temperature hierachy */ class_test(ppr->l_max_g_ten < 4, ppt->error_message, "ppr->l_max_g_ten should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third momentum"); /* reject inconsistent values of the number of mutipoles in photon polarization hierachy */ class_test(ppr->l_max_pol_g_ten < 4, ppt->error_message, "ppr->l_max_pol_g_ten should be at least 4"); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppv->l_max_g = ppr->l_max_g_ten; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */ ppv->l_max_pol_g = ppr->l_max_pol_g_ten; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */ class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */ class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */ class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */ } } /* ultra relativistic neutrinos */ class_define_index(ppv->index_pt_delta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur density */ class_define_index(ppv->index_pt_theta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur velocity */ class_define_index(ppv->index_pt_shear_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur shear */ ppv->l_max_ur = ppr->l_max_ur; class_define_index(ppv->index_pt_l3_ur,ppt->evolve_tensor_ur,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */ if (ppt->evolve_tensor_ncdm == _TRUE_) { ppv->index_pt_psi0_ncdm1 = index_pt; ppv->N_ncdm = pba->N_ncdm; class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ // Set value of ppv->l_max_ncdm: class_test(ppr->l_max_ncdm < 4, ppt->error_message, "ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm); //Copy value from precision parameter: ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm; ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm]; index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm]; } } /** (b) metric perturbation h is a propagating degree of freedom, so h and hdot are included in the vector of ordinary perturbations, no in that of metric perturbations */ class_define_index(ppv->index_pt_gw,_TRUE_,index_pt,1); /* tensor metric perturbation h (gravitational waves) */ class_define_index(ppv->index_pt_gwdot,_TRUE_,index_pt,1); /* its time-derivative */ } ppv->pt_size = index_pt; /** - allocate vectors for storing the values of all these quantities and their time-derivatives at a given time */ class_calloc(ppv->y,ppv->pt_size,sizeof(double),ppt->error_message); class_alloc(ppv->dy,ppv->pt_size*sizeof(double),ppt->error_message); class_alloc(ppv->used_in_sources,ppv->pt_size*sizeof(int),ppt->error_message); /** - specify which perturbations are needed in the evaluation of source terms */ /* take all of them by default */ for (index_pt=0; index_pt<ppv->pt_size; index_pt++) ppv->used_in_sources[index_pt] = _TRUE_; /* indicate which ones are not needed (this is just for saving time, omitting perturbations in this list will not change the results!) */ if (_scalars_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* we don't need temperature multipoles above l=2 (but they are defined only when rsa and tca are off) */ for (index_pt=ppv->index_pt_l3_g; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; /* for polarisation, we only need l=0,2 (but l =1,3, ... are defined only when rsa and tca are off) */ ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_; for (index_pt=ppv->index_pt_pol3_g; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { /* we don't need ur multipoles above l=2 (but they are defined only when rsa and ufa are off) */ for (index_pt=ppv->index_pt_l3_ur; index_pt <= ppv->index_pt_delta_ur+ppv->l_max_ur; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } } if (pba->has_ncdm == _TRUE_) { /* we don't need ncdm multipoles above l=2 (but they are defined only when ncdmfa is off) */ index_pt = ppv->index_pt_psi0_ncdm1; for(n_ncdm = 0; n_ncdm < ppv-> N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ if (l>2) ppv->used_in_sources[index_pt]=_FALSE_; index_pt++; } } } } } if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* we don't need temperature multipoles above except l=0,2,4 */ ppv->used_in_sources[ppv->index_pt_theta_g]=_FALSE_; ppv->used_in_sources[ppv->index_pt_l3_g]=_FALSE_; for (index_pt=ppv->index_pt_delta_g+5; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; /* same for polarisation, we only need l=0,2,4 */ ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_; ppv->used_in_sources[ppv->index_pt_pol3_g]=_FALSE_; for (index_pt=ppv->index_pt_pol0_g+5; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } /* we need h' but not h */ ppv->used_in_sources[ppv->index_pt_gw]=_FALSE_; } /** - case of setting initial conditions for a new wavenumber */ if (pa_old == NULL) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: initializing vector at tau=%e\n",k,tau); if (_scalars_) { /** (a) check that current approximation scheme is consistent with initial conditions */ class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on, ppt->error_message, "scalar initial conditions assume radiation streaming approximation turned off"); if (pba->has_ur == _TRUE_) { class_test(ppw->approx[ppw->index_ap_ufa] == (int)ufa_on, ppt->error_message, "scalar initial conditions assume ur fluid approximation turned off"); } if (pba->has_ncdm == _TRUE_) { class_test(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on, ppt->error_message, "scalar initial conditions assume ncdm fluid approximation turned off"); } class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off, ppt->error_message, "scalar initial conditions assume tight-coupling approximation turned on"); } if (_tensors_) { class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off, ppt->error_message, "tensor initial conditions assume tight-coupling approximation turned on"); class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on, ppt->error_message, "tensor initial conditions assume radiation streaming approximation turned off"); } /** (b) let ppw->pv points towards the perturb_vector structure that we just created */ ppw->pv = ppv; /** (c) fill the vector ppw->pv->y with appropriate initial conditions */ class_call(perturb_initial_conditions(ppr, pba, ppt, index_md, index_ic, k, tau, ppw), ppt->error_message, ppt->error_message); } /** - case of switching approximation while a wavenumber is being integrated */ else { /** (a) for the scalar mode: */ if (_scalars_) { /** -- check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** -- some variables (b, cdm, fld, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ ppv->y[ppv->index_pt_delta_b] = ppw->pv->y[ppw->pv->index_pt_delta_b]; ppv->y[ppv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_b]; if (pba->has_cdm == _TRUE_) { ppv->y[ppv->index_pt_delta_cdm] = ppw->pv->y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == newtonian) { ppv->y[ppv->index_pt_theta_cdm] = ppw->pv->y[ppw->pv->index_pt_theta_cdm]; } } if (pba->has_dcdm == _TRUE_) { ppv->y[ppv->index_pt_delta_dcdm] = ppw->pv->y[ppw->pv->index_pt_delta_dcdm]; ppv->y[ppv->index_pt_theta_dcdm] = ppw->pv->y[ppw->pv->index_pt_theta_dcdm]; } if (pba->has_dr == _TRUE_){ for (l=0; l <= ppv->l_max_dr; l++) ppv->y[ppv->index_pt_F0_dr+l] = ppw->pv->y[ppw->pv->index_pt_F0_dr+l]; } if (pba->has_fld == _TRUE_) { ppv->y[ppv->index_pt_delta_fld] = ppw->pv->y[ppw->pv->index_pt_delta_fld]; ppv->y[ppv->index_pt_theta_fld] = ppw->pv->y[ppw->pv->index_pt_theta_fld]; } if (pba->has_scf == _TRUE_) { ppv->y[ppv->index_pt_phi_scf] = ppw->pv->y[ppw->pv->index_pt_phi_scf]; ppv->y[ppv->index_pt_phi_prime_scf] = ppw->pv->y[ppw->pv->index_pt_phi_prime_scf]; } if (pba->has_smg == _TRUE_) {//pass the values only if the order is correct ppv->y[ppv->index_pt_vx_smg] = ppw->pv->y[ppw->pv->index_pt_vx_smg]; ppv->y[ppv->index_pt_vx_prime_smg] = ppw->pvecmetric[ppw->index_mt_vx_prime_smg]; } if (ppt->gauge == synchronous) ppv->y[ppv->index_pt_eta] = ppw->pv->y[ppw->pv->index_pt_eta]; if (ppt->gauge == newtonian) ppv->y[ppv->index_pt_phi] = ppw->pv->y[ppw->pv->index_pt_phi]; /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* tight-coupling approximation for shear_g (previously computed in perturb_derivs: perturb_derivs is always called at the end of generic_evolver, in order to update all quantities in ppw to the time at which the approximation is switched off) */ ppv->y[ppv->index_pt_shear_g] = ppw->tca_shear_g; ppv->y[ppv->index_pt_l3_g] = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->s_l[3]*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for l=3 */ ppv->y[ppv->index_pt_pol0_g] = 2.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=0 */ ppv->y[ppv->index_pt_pol1_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*(5.-2.*ppw->s_l[2])/6.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=1 */ ppv->y[ppv->index_pt_pol2_g] = 0.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=2 */ ppv->y[ppv->index_pt_pol3_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*3.*ppw->s_l[3]/14.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=3 */ if (pba->has_ur == _TRUE_) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } } if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){ // This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly. ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } /* perturbed recombination */ /* the initial conditions are set when tca is switched off (current block) */ if (ppt->has_perturbed_recombination == _TRUE_){ ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = 1./3.*ppv->y[ppw->pv->index_pt_delta_b]; ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =0.; } } // end of block tca ON -> tca OFF /* perturbed recombination */ /* For any other transition in the approximation scheme, we should just copy the value of the perturbations, provided tca is already off (otherwise the indices are not yet allocated). For instance, we do not want to copy the values in the (k,tau) region where both UFA and TCA are engaged.*/ if ((ppt->has_perturbed_recombination == _TRUE_)&&(pa_old[ppw->index_ap_tca]==(int)tca_off)){ ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] = ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } } /* -- case of switching on ur fluid approximation. Provide correct initial conditions to new set of variables */ if (pba->has_ur == _TRUE_) { if ((pa_old[ppw->index_ap_ufa] == (int)ufa_off) && (ppw->approx[ppw->index_ap_ufa] == (int)ufa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on ur fluid approximation at tau=%e\n",k,tau); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; } if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { ppv->y[ppv->index_pt_shear_g] = ppw->pv->y[ppw->pv->index_pt_shear_g]; ppv->y[ppv->index_pt_l3_g] = ppw->pv->y[ppw->pv->index_pt_l3_g]; for (l = 4; l <= ppw->pv->l_max_g; l++) { ppv->y[ppv->index_pt_delta_g+l] = ppw->pv->y[ppw->pv->index_pt_delta_g+l]; } ppv->y[ppv->index_pt_pol0_g] = ppw->pv->y[ppw->pv->index_pt_pol0_g]; ppv->y[ppv->index_pt_pol1_g] = ppw->pv->y[ppw->pv->index_pt_pol1_g]; ppv->y[ppv->index_pt_pol2_g] = ppw->pv->y[ppw->pv->index_pt_pol2_g]; ppv->y[ppv->index_pt_pol3_g] = ppw->pv->y[ppw->pv->index_pt_pol3_g]; for (l = 4; l <= ppw->pv->l_max_pol_g; l++) { ppv->y[ppv->index_pt_pol0_g+l] = ppw->pv->y[ppw->pv->index_pt_pol0_g+l]; } } if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; } if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ /** This is correct even when ncdmfa == off, since ppv->l_max_ncdm and ppv->q_size_ncdm is updated.*/ ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } } } /* -- case of switching on ncdm fluid approximation. Provide correct initial conditions to new set of variables */ if (pba->has_ncdm == _TRUE_) { if ((pa_old[ppw->index_ap_ncdmfa] == (int)ncdmfa_off) && (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on ncdm fluid approximation at tau=%e\n",k,tau); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; } if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { ppv->y[ppv->index_pt_shear_g] = ppw->pv->y[ppw->pv->index_pt_shear_g]; ppv->y[ppv->index_pt_l3_g] = ppw->pv->y[ppw->pv->index_pt_l3_g]; for (l = 4; l <= ppw->pv->l_max_g; l++) { ppv->y[ppv->index_pt_delta_g+l] = ppw->pv->y[ppw->pv->index_pt_delta_g+l]; } ppv->y[ppv->index_pt_pol0_g] = ppw->pv->y[ppw->pv->index_pt_pol0_g]; ppv->y[ppv->index_pt_pol1_g] = ppw->pv->y[ppw->pv->index_pt_pol1_g]; ppv->y[ppv->index_pt_pol2_g] = ppw->pv->y[ppw->pv->index_pt_pol2_g]; ppv->y[ppv->index_pt_pol3_g] = ppw->pv->y[ppw->pv->index_pt_pol3_g]; for (l = 4; l <= ppw->pv->l_max_pol_g; l++) { ppv->y[ppv->index_pt_pol0_g+l] = ppw->pv->y[ppw->pv->index_pt_pol0_g+l]; } } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } } } a = ppw->pvecback[pba->index_bg_a]; index_pt = ppw->pv->index_pt_psi0_ncdm1; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ // We are in the fluid approximation, so ncdm_l_size is always 3. ncdm_l_size = ppv->l_max_ncdm[n_ncdm]+1; rho_plus_p_ncdm = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; for(l=0; l<=2; l++){ ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+l] = 0.0; } factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for(index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++){ // Integrate over distributions: q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] += pba->w_ncdm[n_ncdm][index_q]*q2*epsilon* ppw->pv->y[index_pt]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] += pba->w_ncdm[n_ncdm][index_q]*q2*q* ppw->pv->y[index_pt+1]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] += pba->w_ncdm[n_ncdm][index_q]*q2*q2/epsilon* ppw->pv->y[index_pt+2]; //Jump to next momentum bin in ppw->pv->y: index_pt += (ppw->pv->l_max_ncdm[n_ncdm]+1); } ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] *=factor/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] *=k*factor/rho_plus_p_ncdm; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] *=2.0/3.0*factor/rho_plus_p_ncdm; } } } } /** (b) for the vector mode */ if (_vectors_) { /** -- check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** -- some variables (gw, gwdot, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ if (ppt->gauge == synchronous){ ppv->y[ppv->index_pt_hv_prime] = ppw->pv->y[ppw->pv->index_pt_hv_prime]; } if (ppt->gauge == newtonian){ ppv->y[ppv->index_pt_V] = ppw->pv->y[ppw->pv->index_pt_V]; } ppv->y[ppv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_b]; /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = 0.0; //TBC //-4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; ppv->y[ppv->index_pt_pol0_g] = 0.0; //TBC //1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); } } /** (c) for the tensor mode */ if (_tensors_) { /** -- check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** -- some variables (gw, gwdot, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ ppv->y[ppv->index_pt_gw] = ppw->pv->y[ppw->pv->index_pt_gw]; ppv->y[ppv->index_pt_gwdot] = ppw->pv->y[ppw->pv->index_pt_gwdot]; if (ppt->evolve_tensor_ur == _TRUE_){ /** For now, neutrinos go here. */ ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } if (ppt->evolve_tensor_ncdm == _TRUE_){ index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){ // This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly. ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; ppv->y[ppv->index_pt_pol0_g] = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); } } /** (c) free the previous vector of perturbations */ class_call(perturb_vector_free(ppw->pv), ppt->error_message, ppt->error_message); /** (d) let ppw->pv points towards the perturb_vector structure that we just created */ ppw->pv = ppv; } return _SUCCESS_; } /** * Free the perturb_vector structure. * * @param pv Input: pointer to perturb_vector structure to be freed * @return the error status */ int perturb_vector_free( struct perturb_vector * pv ) { if (pv->l_max_ncdm != NULL) free(pv->l_max_ncdm); if (pv->q_size_ncdm != NULL) free(pv->q_size_ncdm); free(pv->y); free(pv->dy); free(pv->used_in_sources); free(pv); return _SUCCESS_; } /** * For each mode, wavenumber and initial condition, this function * initializes all values in the vector of perturbed variables (in a * given gauge). It is assumed here that all values have been set * previously to zero, only non-zero values are set here. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: workspace containing input the approximation scheme, the background/thermodynamics/metric quantitites, and eventually the previous vector y; and in output the new vector y. * @return the error status */ int perturb_initial_conditions(struct precision * ppr, struct background * pba, struct perturbs * ppt, int index_md, int index_ic, double k, double tau, struct perturb_workspace * ppw ) { /** Summary: */ /** - declare local variables */ double a,a_prime_over_a; double delta_ur=0.,theta_ur=0.,shear_ur=0.,l3_ur=0.,eta=0.,delta_cdm=0.,alpha, alpha_prime; double delta_dr=0; double q,epsilon,k2; int index_q,n_ncdm,idx; double rho_r,rho_m,rho_nu,rho_m_over_rho_r; double fracnu,fracg,fracb,fraccdm,om; double ktau_two,ktau_three; double f_dr; double delta_tot; double velocity_tot; double s2_squared; /** - for scalars */ if (_scalars_) { /** (a) compute relevant background quantities: compute rho_r, rho_m, rho_nu (= all relativistic except photons), and their ratio. */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); a = ppw->pvecback[pba->index_bg_a]; a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a; /* 8piG/3 rho_r(t_i) */ rho_r = ppw->pvecback[pba->index_bg_rho_g]; /* 8piG/3 rho_m(t_i) */ rho_m = ppw->pvecback[pba->index_bg_rho_b]; /* 8piG/3 rho_nu(t_i) (all neutrinos and collisionless relics being relativistic at that time) */ rho_nu = 0.; if (pba->has_cdm == _TRUE_) { rho_m += ppw->pvecback[pba->index_bg_rho_cdm]; } if (pba->has_dcdm == _TRUE_) { rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } if (pba->has_dr == _TRUE_) { rho_r += ppw->pvecback[pba->index_bg_rho_dr]; rho_nu += ppw->pvecback[pba->index_bg_rho_dr]; } if (pba->has_ur == _TRUE_) { rho_r += ppw->pvecback[pba->index_bg_rho_ur]; rho_nu += ppw->pvecback[pba->index_bg_rho_ur]; } if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm<pba->N_ncdm; n_ncdm++){ rho_r += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm]; rho_nu += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm]; } } class_test(rho_r == 0., ppt->error_message, "stop to avoid division by zero"); /* f_nu = Omega_nu(t_i) / Omega_r(t_i) */ fracnu = rho_nu/rho_r; /* f_g = Omega_g(t_i) / Omega_r(t_i) */ fracg = ppw->pvecback[pba->index_bg_rho_g]/rho_r; /* f_b = Omega_b(t_i) / Omega_m(t_i) */ fracb = ppw->pvecback[pba->index_bg_rho_b]/rho_m; /* f_cdm = Omega_cdm(t_i) / Omega_m(t_i) */ fraccdm = 1.-fracb; /* Omega_m(t_i) / Omega_r(t_i) */ rho_m_over_rho_r = rho_m/rho_r; /* omega = Omega_m(t_i) a(t_i) H(t_i) / sqrt(Omega_r(t_i)) = Omega_m(t_0) a(t_0) H(t_0) / sqrt(Omega_r(t_0)) assuming rho_m in a-3 and rho_r in a^-4 = (8piG/3 rho_m(t_i)) a(t_i) / sqrt(8piG/3 rho_r(t_i)) in Mpc-1 This (a priori stange) parameter is the relevant one for expressing a as a function of tau during radiation and matter domination (but not DE domination). Indeed the exact solution of Friedmann when there is only radiation and matter in the universe is a = [H(t_0)^2 Omega_m(t_0) a(t_0)^3 / 4] x [tau^2 + 4 tau / omega] */ om = a*rho_m/sqrt(rho_r); /* (k tau)^2, (k tau)^3 */ ktau_two=k*k*tau*tau; ktau_three=k*tau*ktau_two; /* curvature-dependent factors */ s2_squared = 1.-3.*pba->K/k/k; /** (b) starts by setting everything in synchronous gauge. If another gauge is needed, we will perform a gauge transformation below. */ /** (b.1) adiabatic */ if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) { /* The following formulas are valid at leading order in (k*tau) and (om*tau), and order zero in tight-coupling. Identical to first order terms in CRS, except for normalization (when ppr->curvature_ini=1, tau=1: leads to factor 1/2 difference between CRS formulas with beta1=0). Identical to CAMB when om set to zero in theta_g, theta_ur, shear_ur, tau In the non-flat case the relation R=eta is still valid outsode the horizon for adiabatic IC. Hence eta is still set to ppr->curvature_ini at leading order. Factors s2 appear through the solution of Einstein equations and equations of motion. */ /* photon density */ ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3. * (1.-om*tau/5.) * ppr->curvature_ini * s2_squared; /* photon velocity */ ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36. * (1.-3.*(1.+5.*fracb-fracnu)/20./(1.-fracnu)*om*tau) * ppr->curvature_ini * s2_squared; /* tighly-coupled baryons */ ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* baryon density */ ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* baryon velocity */ if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* cdm density */ /* cdm velocity velocity vanishes in the synchronous gauge */ } if (pba->has_dcdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_dcdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* dcdm density */ /* dcdm velocity velocity vanishes initially in the synchronous gauge */ } /* fluid (assumes wa=0, if this is not the case the fluid will catch anyway the attractor solution) */ if (pba->has_fld == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_fld] = - ktau_two/4.*(1.+pba->w0_fld+pba->wa_fld)*(4.-3.*pba->cs2_fld)/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC: curvature ppw->pv->y[ppw->pv->index_pt_theta_fld] = - k*ktau_three/4.*pba->cs2_fld/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC:curvature } if (pba->has_scf == _TRUE_) { /** Canonical field (solving for the perturbations): initial perturbations set to zero, they should reach the attractor soon enough. TODO: Incorporate the attractor IC from 1004.5509 delta_phi = -(a/k)^2/phi'(rho + p)theta delta_phi_prime = a^2/phi' (delta_rho_phi + V'delta_phi) and assume theta, delta_rho as for perfect fluid with c_s^2 = 1 and w = 1/3 (ASSUMES radiation TRACKING) */ ppw->pv->y[ppw->pv->index_pt_phi_scf] = 0.; /** a*a/k/k/ppw->pvecback[pba->index_bg_phi_prime_scf]*k*ktau_three/4.*1./(4.-6.*(1./3.)+3.*1.) * (ppw->pvecback[pba->index_bg_rho_scf] + ppw->pvecback[pba->index_bg_p_scf])* ppr->curvature_ini * s2_squared; */ ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] = 0.; /** delta_fld expression * rho_scf with the w = 1/3, c_s = 1 a*a/ppw->pvecback[pba->index_bg_phi_prime_scf]*( - ktau_two/4.*(1.+1./3.)*(4.-3.*1.)/(4.-6.*(1/3.)+3.*1.)*ppw->pvecback[pba->index_bg_rho_scf] - ppw->pvecback[pba->index_bg_dV_scf]*ppw->pv->y[ppw->pv->index_pt_phi_scf])* ppr->curvature_ini * s2_squared; */ } /* Initial conditions for the scalar field * Rigth now these are Adiabatic/Single clock * v_X = delta_phi/phi_dot * phi(t,x) = phi(tau+delta tau(x)) * This leads to very simple expressions: * v_X = delta tau = delta_cdm/a_prime_over_a and v_X_prime = 0 */ if (pba->has_smg == _TRUE_) { if (pba->pert_initial_conditions_smg == single_clock){ // single_clock IC given with respect to photons (because there are always photons) ppw->pv->y[ppw->pv->index_pt_vx_smg] = -1/(4.*ppw->pvecback[pba->index_bg_H])*ppw->pv->y[ppw->pv->index_pt_delta_g]; // Single clock IC => v_x^prime = 0 ppw->pv->y[ppw->pv->index_pt_vx_prime_smg] = 0. ; if(ppt->perturbations_verbose > 5) printf("Single clock IC for smg: "); } if (pba->pert_initial_conditions_smg == zero){ ppw->pv->y[ppw->pv->index_pt_vx_smg] = 0.; ppw->pv->y[ppw->pv->index_pt_vx_prime_smg] = 0. ; if(ppt->perturbations_verbose > 5) printf("Zero IC for smg: "); } if(ppt->perturbations_verbose > 5) printf("Vx = %e, Vx'= %e \n",ppw->pv->y[ppw->pv->index_pt_vx_smg],ppw->pv->y[ppw->pv->index_pt_vx_prime_smg]); } /* all relativistic relics: ur, early ncdm, dr */ if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; /* density of ultra-relativistic neutrinos/relics */ theta_ur = - k*ktau_three/36./(4.*fracnu+15.) * (4.*fracnu+11.+12.*s2_squared-3.*(8.*fracnu*fracnu+50.*fracnu+275.)/20./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini * s2_squared; /* velocity of ultra-relativistic neutrinos/relics */ //TBC shear_ur = ktau_two/(45.+12.*fracnu) * (3.*s2_squared-1.) * (1.+(4.*fracnu-5.)/4./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini;//TBC /s2_squared; /* shear of ultra-relativistic neutrinos/relics */ //TBC:0 l3_ur = ktau_three*2./7./(12.*fracnu+45.)* ppr->curvature_ini;//TBC if (pba->has_dr == _TRUE_) delta_dr = delta_ur; } /* synchronous metric perturbation eta */ //eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)) / s2_squared; //eta = ppr->curvature_ini * s2_squared * (1.-ktau_two/12./(15.+4.*fracnu)*(15.*s2_squared-10.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)); eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)); } /* isocurvature initial conditions taken from Bucher, Moodely, Turok 99, with just a different normalization convention for tau and the scale factor. [k tau] from BMT99 is left invariant because it is the ratio [k/aH]. But [Omega_i,0 tau] from BMT99 must be replaced by [frac_i*om*tau/4]. Some doubts remain about the niv formulas, that should be recheked at some point. We also checked that for bi,cdi,nid, everything coincides exactly with the CAMB formulas. */ /** (b.2) Cold dark matter Isocurvature */ if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) { class_test(pba->has_cdm == _FALSE_, ppt->error_message, "not consistent to ask for CDI in absence of CDM!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fraccdm*om*tau*(-2./3.+om*tau/4.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fraccdm*om*ktau_two/12.; ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; ppw->pv->y[ppw->pv->index_pt_delta_cdm] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g]; shear_ur = -ppr->entropy_ini*fraccdm*ktau_two*tau*om/6./(2.*fracnu+15.); } eta = -ppr->entropy_ini*fraccdm*om*tau*(1./6.-om*tau/16.); } /** (b.3) Baryon Isocurvature */ if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) { ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracb*om*tau*(-2./3.+om*tau/4.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracb*om*ktau_two/12.; ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; } if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g]; shear_ur = -ppr->entropy_ini*fracb*ktau_two*tau*om/6./(2.*fracnu+15.); } eta = -ppr->entropy_ini*fracb*om*tau*(1./6.-om*tau/16.); } /** (b.4) Neutrino density Isocurvature */ if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) { class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_), ppt->error_message, "not consistent to ask for NID in absence of ur or ncdm species!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracnu/fracg*(-1.+ktau_two/6.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracnu/fracg*k*k*tau*(1./4.-fracb/fracg*3./16.*om*tau); ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini*fracnu/fracg/8.*ktau_two; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*fracnu*fracb/fracg/80.*ktau_two*om*tau; } delta_ur = ppr->entropy_ini*(1.-ktau_two/6.); theta_ur = ppr->entropy_ini*k*k*tau/4.; shear_ur = ppr->entropy_ini*ktau_two/(4.*fracnu+15.)/2.; eta = -ppr->entropy_ini*fracnu/(4.*fracnu+15.)/6.*ktau_two; } /** (b.5) Neutrino velocity Isocurvature */ if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) { class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_), ppt->error_message, "not consistent to ask for NIV in absence of ur or ncdm species!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*k*tau*fracnu/fracg* (1. - 3./16.*fracb*(2.+fracg)/fracg*om*tau); /* small diff wrt camb */ ppw->pv->y[ppw->pv->index_pt_theta_g] = ppr->entropy_ini*fracnu/fracg*3./4.*k* (-1.+3./4.*fracb/fracg*om*tau+3./16.*om*om*tau*tau*fracb/fracg/fracg*(fracg-3.*fracb)+ktau_two/6.); ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* small diff wrt camb */ ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*9./64.*fracnu*fracb/fracg*k*tau*om*tau; } delta_ur = -ppr->entropy_ini*k*tau*(1.+3./16.*fracb*fracnu/fracg*om*tau); /* small diff wrt camb */ theta_ur = ppr->entropy_ini*3./4.*k*(1. - 1./6.*ktau_two*(4.*fracnu+9.)/(4.*fracnu+5.)); shear_ur = ppr->entropy_ini/(4.*fracnu+15.)*k*tau*(1. + 3.*om*tau*fracnu/(4.*fracnu+15.)); /* small diff wrt camb */ eta = ppr->entropy_ini*fracnu*k*tau*(-1./(4.*fracnu+5.) + (-3./64.*fracb/fracg+15./4./(4.*fracnu+15.)/(4.*fracnu+5.)*om*tau)); /* small diff wrt camb */ } /** (c) If the needed gauge is really the synchronous gauge, we need to affect the previously computed value of eta to the actual variable eta */ if (ppt->gauge == synchronous) { ppw->pv->y[ppw->pv->index_pt_eta] = eta; } /** (d) If the needed gauge is the newtonian gauge, we must compute alpha and then perform a gauge transformation for each variable */ if (ppt->gauge == newtonian) { /* alpha is like in Ma & Bertschinger: (h'+6 eta')/(2k^2). We obtain it from the first two Einstein equations: alpha = [eta + 3/2 (a'/a)^2 (delta_rho/rho_c) / k^2 /s_2^2 + 3/2 (a'/a)^3 3 ((rho+p)theta/rho_c) / k^4 / s_2^2] / (a'/a) = [eta + 3/2 (a'/a)^2 / k^2 /s_2^2 {delta_tot + 3 (a'/a) /k^2 velocity_tot}] / (a'/a) with delta_tot = (delta_rho/rho_c) = [rho_r delta_r + rho_m delta_m] / (rho_r + rho_m) = [delta_r + (rho_m/rho_r) delta_m] / (1 + rho_m/rho_r) = [(f_g delta_g + f_nu delta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm delta_cdm)] / (1 + rho_m/rho_r) velocity_tot = ((rho+p)theta/rho_c) = [(4/3) rho_r theta_r + rho_m theta_m] / (rho_r + rho_m) = [(4/3) theta_r + (rho_m/rho_r) theta_m] / (1 + rho_m/rho_r) = [(4/3) (f_g theta_g + f_nu theta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm 0)] / (1 + rho_m/rho_r) */ if (pba->has_cdm == _TRUE_) delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_cdm]; else if (pba->has_dcdm == _TRUE_) delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_dcdm]; else delta_cdm=0.; // note: if there are no neutrinos, fracnu, delta_ur and theta_ur below will consistently be zero. delta_tot = (fracg*ppw->pv->y[ppw->pv->index_pt_delta_g]+fracnu*delta_ur+rho_m_over_rho_r*(fracb*ppw->pv->y[ppw->pv->index_pt_delta_b]+fraccdm*delta_cdm))/(1.+rho_m_over_rho_r); velocity_tot = ((4./3.)*(fracg*ppw->pv->y[ppw->pv->index_pt_theta_g]+fracnu*theta_ur) + rho_m_over_rho_r*fracb*ppw->pv->y[ppw->pv->index_pt_theta_b])/(1.+rho_m_over_rho_r); alpha = (eta + 3./2.*a_prime_over_a*a_prime_over_a/k/k/s2_squared*(delta_tot + 3.*a_prime_over_a/k/k*velocity_tot))/a_prime_over_a; ppw->pv->y[ppw->pv->index_pt_phi] = eta - a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_delta_g] -= 4.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_g] += k*k*alpha; ppw->pv->y[ppw->pv->index_pt_delta_b] -= 3.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_b] += k*k*alpha; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] -= 3.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_cdm] = k*k*alpha; } if (pba->has_dcdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_dcdm] += (-3.*a_prime_over_a - a*pba->Gamma_dcdm)*alpha; ppw->pv->y[ppw->pv->index_pt_theta_dcdm] = k*k*alpha; } /* fluid */ if (pba->has_fld == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_fld] += 3*(1.+pba->w0_fld+pba->wa_fld)*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_fld] += k*k*alpha; } /* scalar field: check */ if (pba->has_scf == _TRUE_) { alpha_prime = 0.0; /**- 2. * a_prime_over_a * alpha + eta - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; */ ppw->pv->y[ppw->pv->index_pt_phi_scf] += alpha*ppw->pvecback[pba->index_bg_phi_prime_scf]; ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] += (-2.*a_prime_over_a*alpha*ppw->pvecback[pba->index_bg_phi_prime_scf] -a*a* dV_scf(pba,ppw->pvecback[pba->index_bg_phi_scf])*alpha +ppw->pvecback[pba->index_bg_phi_prime_scf]*alpha_prime); } if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) { delta_ur -= 4.*a_prime_over_a*alpha; theta_ur += k*k*alpha; /* shear and l3 are gauge invariant */ if (pba->has_dr == _TRUE_) delta_dr += (-4.*a_prime_over_a + a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm]/ppw->pvecback[pba->index_bg_rho_dr])*alpha; } } /* end of gauge transformation to newtonian gauge */ /** (e) In any gauge, we should now implement the relativistic initial conditions in ur and ncdm variables */ if (pba->has_ur == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_ur] = delta_ur; ppw->pv->y[ppw->pv->index_pt_theta_ur] = theta_ur; ppw->pv->y[ppw->pv->index_pt_shear_ur] = shear_ur; ppw->pv->y[ppw->pv->index_pt_l3_ur] = l3_ur; } if (pba->has_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++) { q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); ppw->pv->y[idx] = -0.25 * delta_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+1] = -epsilon/3./q/k*theta_ur* pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+2] = -0.5 * shear_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+3] = -0.25 * l3_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; //Jump to next momentum bin: idx += (ppw->pv->l_max_ncdm[n_ncdm]+1); } } } if (pba->has_dr == _TRUE_) { f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*ppw->pvecback[pba->index_bg_rho_dr]; ppw->pv->y[ppw->pv->index_pt_F0_dr] = delta_dr*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+1] = 4./(3.*k)*theta_ur*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+2] = 2.*shear_ur*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+3] = l3_ur*f_dr; } } /** - for tensors */ if (_tensors_) { /* tensor initial conditions take into account the fact that scalar (resp. tensor) Cl's are related to the real space power spectrum of curvature (resp. of the tensor part of metric perturbations) <R(x) R(x)>, sum_ij<h_ij(x) h^ij(x)> In momentum space it is conventional to use the modes R(k) and h(k) where the quantity h obeying to the equation of propagation h'' + 2a'/a h + [k2+2K] h = 12piGa2 (rho+p) sigma = 8piGa2 p pi and the power spectra in real space and momentum space are related through <R(x) R(x)> = \int dk/k [k^3/2pi^2 <R(k)R(k)*>] = \int dk/k calPR(k) sum_ij<h_ij(x) h^ij(x)> = \int dk/k [k^3/2pi^2 F(k^2/K) <h(k)h(k)*>] = \int dk/k F(k^2/K) calPh(k) where calPR and calPh are the dimensionless spectrum of curvature R, and F is a function of k2/K, where K is the curvature parameter. F is equal to one in flat space (K=0), and coming from the contraction of the laplacian eigentensor Q_ij with itself. We will give F explicitely below. Similarily the scalar (S) and tensor (T) C_ls are given by C_l^S = 4pi \int dk/k [Delta_l^S(q)]^2 calPR(k) C_l^T = 4pi \int dk/k [Delta_l^T(q)]^2 F(k^2/K) calPh(k) The usual convention for the tensor-to-scalar ratio r = A_t / A_s at pivot scale = 16 epsilon in single-field inflation is such that for constant calPR(k) and calPh(k), r = 6 calPh(k) / calPR(k) so calPh(k) = calPR(k) r / 6 = A_s r / 6 = A_t / 6 A priori it would make sense to say that for a power-law primordial spectrum there is an extra factor (k/k_pivot)^n_t (and eventually running and so on and so forth...) However it has been shown that the minimal models of inflation in a negatively curved bubble lead to calP_h(k)=tanh(pi*nu/2). In open models it is customary to define the tensor tilt in a non-flat universe as a deviation from this behavior rather than from true scale-invariance in the above sense. Hence we should have calPh(k) = (A_t/6) {tanh(pi*nu/2)} (k/k_pivot)^[n_t+...] where the brackets mean "if K<0" Then C_l^T = 4pi \int dk/k [Delta_l^T(q)]^2 F(k^2/K) (A_t/6) {tanh(pi*nu/2)} k/k_pivot)^[n_t+...] In the code, it is then a matter of choice to write: * In the primordial module : calP_h(k) = (A_t/6) {tanh(pi*nu/2)} (k/k*)^n_T * In the perturbation initial conditions: h = 1 * In the spectra module : C_l^T = 4/pi \int dk/k [Delta_l^T(q)]^2 F(k^2/K) calPh(k) or: * In the primordial module : calP_h(k) = A_t (k/k*)^n_T * In the perturbation initial conditions: h = sqrt[F(k^2/K) / 6 {tanh(pi*nu/2)}] * In the spectra module : C_l^T = 4/pi \int dk/k [Delta_l^T(q)]^2 calPh(k) We choose this last option, such that the primordial and spectra module differ minimally in flat and non-flat space. Then we must impose h = sqrt[(F/6) tanh(pi*nu/2)] The factor F is found to be given by: sum_ij<h_ij(x) h^ij(x)> = \int dk/k [k2(k2-K)]/[(k2+3K)(k2+2K)] calP_h(k) Introducing as usual q2 = k2 - 3K and using qdq = kdk this gives sum_ij<h_ij(x) h^ij(x)> = \int dk/k [(q2-3K)(q2-4K)]/[q2(q2-K)] calP_h(k) Using qdq = kdk this is equivalent to sum_ij<h_ij(x) h^ij(x)> = \int dq/q [q2-4K]/[q2-K] calP_h(k(q)) Finally, introducing nu=q/sqrt(|K|) and sgnK=SIGN(k)=+-1, this could also be written sum_ij<h_ij(x) h^ij(x)> = \int dnu/nu (nu2-4sgnK)/(nu2-sgnK) calP_h(k(nu)) Equation (43,44) of Hu, Seljak, White, Zaldarriaga is equivalent to absorbing the above factor (nu2-4sgnK)/(nu2-sgnK) in the definition of the primordial spectrum. Since the initial condition should be written in terms of k rather than nu, they should read h = sqrt[ [k2(k2-K)]/[(k2+3K)(k2+2K)] / 6 * tanh(pi*nu/2) ] We leave the freedom to mutiply by an arbitrary number ppr->gw_ini. The standard convenrtion corresponding to standard definitions of r, A_T, n_T is however ppr->gw_ini=1. */ if (index_ic == ppt->index_ic_ten) { ppw->pv->y[ppw->pv->index_pt_gw] = ppr->gw_ini/_SQRT6_; } k2 = k*k; if (pba->sgnK != 0) { ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(k2*(k2-pba->K)/(k2+3.*pba->K)/(k2+2.*pba->K)); } if (pba->sgnK == -1) { if (k*k+3*pba->K >= 0.) { ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(tanh(_PI_/2.*sqrt(k2+3*pba->K)/sqrt(-pba->K))); } else { ppw->pv->y[ppw->pv->index_pt_gw] = 0.; } } } return _SUCCESS_; } /** * Evaluate background/thermodynamics at \f$ \tau \f$, infer useful flags / time scales for integrating perturbations. * * Evaluate background quantities at \f$ \tau \f$, as well as thermodynamics for scalar mode; infer useful flags and time scales for integrating the perturbations: * - check whether tight-coupling approximation is needed. * - check whether radiation (photons, massless neutrinos...) perturbations are needed. * - choose step of integration: step = ppr->perturb_integration_stepsize * min_time_scale, where min_time_scale = smallest time scale involved in the equations. There are three time scales to compare: * -# that of recombination, \f$ \tau_c = 1/\kappa' \f$ * -# Hubble time scale, \f$ \tau_h = a/a' \f$ * -# Fourier mode, \f$ \tau_k = 1/k \f$ * * So, in general, min_time_scale = \f$ \min(\tau_c, \tau_b, \tau_h, \tau_k) \f$. * * However, if \f$ \tau_c \ll \tau_h \f$ and \f$ \tau_c * \ll \tau_k \f$, we can use the tight-coupling regime for photons * and write equations in such way that the time scale \f$ * \tau_c \f$ becomes irrelevant (no effective mass term in \f$ * 1/\tau_c \f$). Then, the smallest * scale in the equations is only \f$ \min(\tau_h, \tau_k) \f$. * In practise, it is sufficient to use only the condition \f$ \tau_c \ll \tau_h \f$. * * Also, if \f$ \rho_{matter} \gg \rho_{radiation} \f$ and \f$ k \gg * aH \f$, we can switch off radiation perturbations (i.e. switch on * the free-streaming approximation) and then the smallest scale is * simply \f$ \tau_h \f$. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: in output contains the approximation to be used at this time * @return the error status */ int perturb_approximations( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double tau, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ /* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */ double tau_k; /* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */ double tau_h; /* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */ double tau_c; /** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */ class_test(k == 0., ppt->error_message, "stop to avoid division by zero"); tau_k = 1./k; /** - evaluate background quantities with background_at_tau() and Hubble time scale \f$ \tau_h = a/a' \f$ */ class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); class_test(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a] == 0., ppt->error_message, "aH=0, stop to avoid division by zero"); tau_h = 1./(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a]); /** - for scalars modes: */ if (_scalars_) { /** (a) evaluate thermodynamical quantities with thermodynamics_at_z() */ class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /** (b.1) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */ if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } /** (b.2) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */ else { /** (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa]; class_test(tau_c < 0., ppt->error_message, "tau_c = 1/kappa' should always be positive unless there is something wrong in the thermodynamics module. However you have here tau_c=%e at z=%e, conformal time=%e x_e=%e. (This could come from the interpolation of a too poorly sampled reionisation history?).\n", tau_c, 1./ppw->pvecback[pba->index_bg_a]-1., tau, ppw->pvecthermo[pth->index_th_xe]); /** (b.2.b) check whether tight-coupling approximation should be on */ if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) && (tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) { ppw->approx[ppw->index_ap_tca] = (int)tca_on; } else { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } } /* (c) free-streaming approximations */ if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) && (tau > pth->tau_free_streaming) && (ppr->radiation_streaming_approximation != rsa_none)) { ppw->approx[ppw->index_ap_rsa] = (int)rsa_on; } else { ppw->approx[ppw->index_ap_rsa] = (int)rsa_off; } if (pba->has_ur == _TRUE_) { if ((tau/tau_k > ppr->ur_fluid_trigger_tau_over_tau_k) && (ppr->ur_fluid_approximation != ufa_none)) { ppw->approx[ppw->index_ap_ufa] = (int)ufa_on; } else { ppw->approx[ppw->index_ap_ufa] = (int)ufa_off; } } if (pba->has_ncdm == _TRUE_) { if ((tau/tau_k > ppr->ncdm_fluid_trigger_tau_over_tau_k) && (ppr->ncdm_fluid_approximation != ncdmfa_none)) { ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_on; } else { ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_off; } } } /** - for tensor modes: */ if (_tensors_) { /** (a) evaluate thermodynamical quantities with thermodynamics_at_z() */ class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /** (b.1) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */ if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } /** (b.2) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */ else { /** (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa]; /** (b.2.b) check whether tight-coupling approximation should be on */ if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) && (tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) { ppw->approx[ppw->index_ap_tca] = (int)tca_on; } else { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } } if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) && (tau > pth->tau_free_streaming) && (ppr->radiation_streaming_approximation != rsa_none)) { ppw->approx[ppw->index_ap_rsa] = (int)rsa_on; } else { ppw->approx[ppw->index_ap_rsa] = (int)rsa_off; } } return _SUCCESS_; } /** * Compute typical timescale over which the perturbation equation * vary. Some integrators (e.g. Runge-Kunta) benefit from calling this * routine at each step in order to adapt the next step. * * This is one of the few functions in the code which are passed to the generic_integrator() routine. * Since generic_integrator() should work with functions passed from various modules, the format of the arguments * is a bit special: * - fixed parameters and workspaces are passed through a generic pointer. * generic_integrator() doesn't know the content of this pointer. * - the error management is a bit special: errors are not written as usual to pth->error_message, but to a generic * error_message passed in the list of arguments. * * @param tau Input : conformal time * @param parameters_and_workspace Input : fixed parameters (e.g. indices), workspace, approximation used, etc. * @param timescale Output: perturbation variation timescale (given the apprtoximation used) * @param error_message Output: error message */ int perturb_timescale( double tau, void * parameters_and_workspace, double * timescale, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */ double tau_k; /* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */ double tau_h; /* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */ double tau_c; /* various pointers allowing to extract the fields of the parameter_and_workspace input structure */ struct perturb_parameters_and_workspace * pppaw; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; /** - extract the fields of the parameter_and_workspace input structure */ pppaw = parameters_and_workspace; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; /** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */ class_test(pppaw->k == 0., ppt->error_message, "stop to avoid division by zero"); tau_k = 1./pppaw->k; /** - evaluate background quantities with background_at_tau() and Hubble time scale \f$ \tau_h = a/a' \f$ */ class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), pvecback), pba->error_message, error_message); class_test(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] == 0., error_message, "aH=0, stop to avoid division by zero"); tau_h = 1./(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]); /** - for scalars modes: */ if ((ppt->has_scalars == _TRUE_) && (pppaw->index_md == ppt->index_md_scalars)) { *timescale = tau_h; if ((ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) || (pba->has_ncdm == _TRUE_)) *timescale = MIN(tau_k,*timescale); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } /** - for vector modes: */ if ((ppt->has_vectors == _TRUE_) && (pppaw->index_md == ppt->index_md_vectors)) { *timescale = MIN(tau_h,tau_k); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } /** - for tensor modes: */ if ((ppt->has_tensors == _TRUE_) && (pppaw->index_md == ppt->index_md_tensors)) { *timescale = MIN(tau_h,tau_k); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } return _SUCCESS_; } /** * Compute metric perturbations (those not integrated over time) using Einstein equations * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: wavenumber * @param tau Input: conformal time * @param y Input: vector of perturbations (those integrated over time) (already allocated) * @param ppw Input/Output: in output contains the updated metric perturbations * @return the error status */ int perturb_einstein( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double tau, double * y, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ double k2,a,a2,a_prime_over_a; double s2_squared; double shear_g = 0.; double D=0, cs2num=0; double l1=0, l2=0, l3=0, l4=0, l5=0, l6=0, l7=0, l8=0; double M2=0, kin=0, bra=0, run=0, ten=0; double rho_tot=0, p_tot=0, rho_smg=0, p_smg=0, H=0; /** - wavenumber and scale factor related quantities */ k2 = k*k; a = ppw->pvecback[pba->index_bg_a]; a2 = a * a; a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a; s2_squared = 1.-3.*pba->K/k2; /* sum up perturbations from all species */ class_call(perturb_total_stress_energy(ppr,pba,pth,ppt,index_md,k,y,ppw), ppt->error_message, ppt->error_message); /** - for scalar modes: */ if (_scalars_) { /** (c) infer metric perturbations from Einstein equations */ /* newtonian gauge */ if (ppt->gauge == newtonian) { /* in principle we could get phi from the constrain equation: ppw->pvecmetric[ppw->index_mt_phi] = -1.5 * (a2/k2/k2/s2/s2) * (k2 * delta_rho + 3.*a_prime_over_a * rho_plus_p_theta); with s2_squared = sqrt(1-3K/k2) = ppw->s_l[2]*ppw->s_l[2] This was the case in class v1.3. However the integration is more stable is we treat phi as a dynamical variable y[ppw->pv->index_pt_phi], which derivative is given by the second equation below (credits to Guido Walter Pettinari). */ /* equation for psi */ ppw->pvecmetric[ppw->index_mt_psi] = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; /* equation for phi' */ ppw->pvecmetric[ppw->index_mt_phi_prime] = -a_prime_over_a * ppw->pvecmetric[ppw->index_mt_psi] + 1.5 * (a2/k2) * ppw->rho_plus_p_theta; /* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw), ppt->error_message, ppt->error_message); } } /* synchronous gauge */ if (ppt->gauge == synchronous) { if (pba->has_smg == _TRUE_) { M2 = ppw->pvecback[pba->index_bg_M2_smg]; kin = ppw->pvecback[pba->index_bg_kineticity_smg]; bra = ppw->pvecback[pba->index_bg_braiding_smg]; run = ppw->pvecback[pba->index_bg_mpl_running_smg]; ten = ppw->pvecback[pba->index_bg_tensor_excess_smg]; rho_tot = ppw->pvecback[pba->index_bg_rho_tot_wo_smg]; p_tot = ppw->pvecback[pba->index_bg_p_tot_wo_smg]; rho_smg = ppw->pvecback[pba->index_bg_rho_smg]; p_smg = ppw->pvecback[pba->index_bg_p_smg]; H = ppw->pvecback[pba->index_bg_H]; l1 = ppw->pvecback[pba->index_bg_lambda_1_smg]; l2 = ppw->pvecback[pba->index_bg_lambda_2_smg]; l3 = ppw->pvecback[pba->index_bg_lambda_3_smg]; l4 = ppw->pvecback[pba->index_bg_lambda_4_smg]; l5 = ppw->pvecback[pba->index_bg_lambda_5_smg]; l6 = ppw->pvecback[pba->index_bg_lambda_6_smg]; l7 = ppw->pvecback[pba->index_bg_lambda_7_smg]; l8 = ppw->pvecback[pba->index_bg_lambda_8_smg]; cs2num = ppw->pvecback[pba->index_bg_cs2num_smg]; D = ppw->pvecback[pba->index_bg_kinetic_D_smg]; /* write here the values, as taken from the integration */ ppw->pvecmetric[ppw->index_mt_vx_smg] = y[ppw->pv->index_pt_vx_smg]; ppw->pvecmetric[ppw->index_mt_vx_prime_smg] = y[ppw->pv->index_pt_vx_prime_smg]; /* scalar field equation */ ppw->pvecmetric[ppw->index_mt_vx_prime_prime_smg] = (-2.)*pow((-2.) + bra,-1)*cs2num*pow(H,-1)*pow(D,-1)*pow(k,2)*y[ppw->pv->index_pt_eta]*pow(a,-1) + (-3.)*pow((-2.) + bra,-1)*pow(H,-1)*pow(D,-1)*l2*pow(M2,-1)*ppw->delta_rho*a + (-9.)/2.*bra*pow(H,-1)*pow(D,-1)*pow(M2,-1)*ppw->delta_p*a + 8.*pow((-2.) + bra,-1)*H*pow(D,-1)*l7*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + (cs2num*pow(k,2) + (-4.)*pow(H,2)*l8*pow(a,2))*2.*pow((-2.) + bra,-1)*pow(D,-1)*ppw->pvecmetric[ppw->index_mt_vx_smg]; class_test(isnan(ppw->pvecmetric[ppw->index_mt_vx_prime_prime_smg]), ppt->error_message, " Isnan v_X'' at a =%e !",a); /* first equation involving total density fluctuation */ ppw->pvecmetric[ppw->index_mt_h_prime] = (-4.)*pow((-2.) + bra,-1)*pow(H,-1)*pow(k,2)*y[ppw->pv->index_pt_eta]*pow(a,-1) + (-6.)*pow((-2.) + bra,-1)*pow(H,-1)*pow(M2,-1)*ppw->delta_rho*a + (3.*bra + kin)*2.*pow((-2.) + bra,-1)*H*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + (2.*pow((-2.) + bra,-1)*bra*pow(k,2) + ((-18.) + 15.*bra + 2.*kin)*pow((-2.) + bra,-1)*rho_smg*pow(a,2) + (18. + (-18.)*M2 + 15.*bra*M2 + 2.*kin*M2)*pow((-2.) + bra,-1)*rho_tot*pow(M2,-1)*pow(a,2) + (2. + (-2.)*M2 + bra*M2)*9.*pow((-2.) + bra,-1)*pow(M2,-1)*p_tot*pow(a,2) + 9.*p_smg*pow(a,2))*ppw->pvecmetric[ppw->index_mt_vx_smg]; /* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw), ppt->error_message, ppt->error_message); /* update total theta given rsa approximation results */ ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g; if (pba->has_ur == _TRUE_) { ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur; } } /* second equation involving total velocity */ ppw->pvecmetric[ppw->index_mt_eta_prime] = 1./2.*bra*H*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + 3./2.*pow(k,-2)*pow(M2,-1)*ppw->rho_plus_p_theta*pow(a,2) + (((-3.) + bra)*1./2.*rho_smg*pow(a,2) + (3. + (-3.)*M2 + bra*M2)*1./2.*rho_tot*pow(M2,-1)*pow(a,2) + ((-1.) + M2)*(-3.)/2.*pow(M2,-1)*p_tot*pow(a,2) + (-3.)/2.*p_smg*pow(a,2))*ppw->pvecmetric[ppw->index_mt_vx_smg]; /* second equation involving total velocity */ ppw->pvecmetric[ppw->index_mt_eta_prime] = 1./2.*bra*H*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*a + 3./2.*pow(k,-2)*pow(M2,-1)*ppw->rho_plus_p_theta*pow(a,2) + (((-3.) + bra)*1./2.*rho_smg*pow(a,2) + (3. + (-3.)*M2 + bra*M2)*1./2.*rho_tot*pow(M2,-1)*pow(a,2) + ((-1.) + M2)*(-3.)/2.*pow(M2,-1)*p_tot*pow(a,2) + (-3.)/2.*p_smg*pow(a,2))*ppw->pvecmetric[ppw->index_mt_vx_smg]; /* eta' */ /* third equation involving total pressure */ ppw->pvecmetric[ppw->index_mt_h_prime_prime] = 2.*pow(D,-1)*pow(k,2)*l1*y[ppw->pv->index_pt_eta] + 2.*H*pow(D,-1)*l3*ppw->pvecmetric[ppw->index_mt_h_prime]*a + (-9.)*kin*pow(D,-1)*pow(M2,-1)*ppw->delta_p*pow(a,2) + 3.*pow(H,2)*pow(D,-1)*l4*ppw->pvecmetric[ppw->index_mt_vx_prime_smg]*pow(a,2) + (2.*H*pow(D,-1)*pow(k,2)*l5*a + 6.*pow(H,3)*pow(D,-1)*l6*pow(a,3))*ppw->pvecmetric[ppw->index_mt_vx_smg]; /* alpha = (h'+6eta')/2k^2 */ ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2; /* eventually, infer first-order tight-coupling approximation for photon shear, then correct the total shear */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) { shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]); ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; } /* fourth equation involving total shear */ /* fourth equation involving total shear */ ppw->pvecmetric[ppw->index_mt_alpha_prime] = (1. + ten)*y[ppw->pv->index_pt_eta] + (2. + run)*(-1.)*H*ppw->pvecmetric[ppw->index_mt_alpha]*a + (run + (-1.)*ten)*H*ppw->pvecmetric[ppw->index_mt_vx_smg]*a + (-9.)/2.*pow(k,-2)*pow(M2,-1)*ppw->rho_plus_p_shear*pow(a,2); }//end if has_smg // Standard equations else { /* first equation involving total density fluctuation */ ppw->pvecmetric[ppw->index_mt_h_prime] = ( k2 * s2_squared * y[ppw->pv->index_pt_eta] + 1.5 * a2 * ppw->delta_rho)/(0.5*a_prime_over_a); /* h' */ /* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw), ppt->error_message, ppt->error_message); /* update total theta given rsa approximation results */ ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g; if (pba->has_ur == _TRUE_) { ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur; } } /* second equation involving total velocity */ ppw->pvecmetric[ppw->index_mt_eta_prime] = (1.5 * a2 * ppw->rho_plus_p_theta + 0.5 * pba->K * ppw->pvecmetric[ppw->index_mt_h_prime])/k2/s2_squared; /* eta' */ /* third equation involving total pressure */ ppw->pvecmetric[ppw->index_mt_h_prime_prime] = - 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_h_prime] + 2. * k2 * s2_squared * y[ppw->pv->index_pt_eta] - 9. * a2 * ppw->delta_p; /* alpha = (h'+6eta')/2k^2 */ ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2; /* eventually, infer first-order tight-coupling approximation for photon shear, then correct the total shear */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) { shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]); ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; } /* fourth equation involving total shear */ ppw->pvecmetric[ppw->index_mt_alpha_prime] = //TBC - 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_alpha] + y[ppw->pv->index_pt_eta] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; }// end of else (if no smg) } //end of syncrhonous /* transform (delta_m, theta_m) of the current gauge into gauge-independent variables (you could comment this out if you really want gauge-dependent results) */ if (ppt->has_source_delta_m == _TRUE_) { ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2; // note: until 2.4.3 there was a typo, the factor was (-2 H'/H) instead // of (3 aH). There is the same typo in the CLASSgal paper // 1307.1459v1,v2,v3. It came from a confusion between (1+w_total) // and (1+w_matter)=1 [the latter is the relevant one here]. // // note2: at this point this gauge-invariant variable is only // valid if all matter components are pressureless and // stable. This relation will be generalised soon to the case // of decaying dark matter. } if (ppt->has_source_theta_m == _TRUE_) { if (ppt->gauge == synchronous) { ppw->theta_m += ppw->pvecmetric[ppw->index_mt_alpha]*k2; } } } if (_vectors_) { if (ppt->gauge == newtonian) { ppw->pvecmetric[ppw->index_mt_V_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_V] - 3.*ppw->vector_source_pi/k; } if (ppt->gauge == synchronous) { // assuming vector_source_pi = p_class a^2 pi_T^{(1)} and vector_source_v = (rho_class+p_class)a^2 v^{(1)} // from Hu and White: ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi/k2; // what we suspect: //ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi; // if we use the other equation: //ppw->pvecmetric[ppw->index_mt_hv_prime] = -2./k/ (1.-2.*pba->K/k2) * 3. * ppw->vector_source_v; } } if (_tensors_) { /* single einstein equation for tensor perturbations */ if (pba->has_smg == _FALSE_) { ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-(k2+2.*pba->K)*y[ppw->pv->index_pt_gw]+ppw->gw_source; } /* modified version if gravity is non-standard. Note that no curvature is allowed in this case */ else{ double M2 = ppw->pvecback[pba->index_bg_M2_smg]; double run = ppw->pvecback[pba->index_bg_mpl_running_smg]; double c_t2 = (1. + ppw->pvecback[pba->index_bg_tensor_excess_smg]); ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -(2. + run)*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-k2*c_t2*y[ppw->pv->index_pt_gw]+ppw->gw_source/M2; } } return _SUCCESS_; } int perturb_total_stress_energy( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double * y, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ double a,a2; double delta_g=0.; double theta_g=0.; double shear_g=0.; double delta_ur=0.; double theta_ur=0.; double shear_ur=0.; double rho_delta_ncdm=0.; double rho_plus_p_theta_ncdm=0.; double rho_plus_p_shear_ncdm=0.; double delta_p_ncdm=0.; double factor; double rho_plus_p_ncdm; int index_q,n_ncdm,idx; double epsilon,q,q2,cg2_ncdm,w_ncdm,rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm; double rho_m,delta_rho_m,rho_plus_p_m,rho_plus_p_theta_m; double w; double gwncdm; double rho_relativistic; double rho_dr_over_f; double delta_rho_scf, delta_p_scf, psi; /** - wavenumber and scale factor related quantities */ a = ppw->pvecback[pba->index_bg_a]; a2 = a * a; if (_scalars_) { /** (a) deal with approximation schemes */ /** (a.1) photons */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** (a.1.1) no approximation */ delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; shear_g = y[ppw->pv->index_pt_shear_g]; } else { /** (a.1.2) radiation streaming approximation */ delta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */ theta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */ shear_g = 0.; /* shear always neglected in radiation streaming approximation */ } } else { /** (a.1.3) tight coupling approximation */ delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; /* first-order tight-coupling approximation for photon shear */ if (ppt->gauge == newtonian) { shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_g]; } else { shear_g = 0.; /* in the synchronous gauge, the expression of shear_g (at first-order in a tight-coupling expansion) is a function of h' and eta'; but h' and eta' are calculated in perturb_einstein() as a function of delta_g and theta_g. Hence, we set shear_g temporarily to zero, and set it to the right first-order value in perturb_einstein(), just before using the Einstein equation for the shear. */ } } /** (a.2) ur */ if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_ur = y[ppw->pv->index_pt_delta_ur]; theta_ur = y[ppw->pv->index_pt_theta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; } else { delta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */ theta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */ shear_ur = 0.; /* shear always neglected in free streaming approximatio */ } } /** (b) compute the total density, velocity and shear perturbations */ /* photon and baryon contribution */ ppw->delta_rho = ppw->pvecback[pba->index_bg_rho_g]*delta_g + ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; ppw->rho_plus_p_theta = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*theta_g + ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; ppw->rho_plus_p_shear = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; ppw->delta_p = 1./3.*ppw->pvecback[pba->index_bg_rho_g]*delta_g + ppw->pvecthermo[pth->index_th_cb2]*ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; /* cdm contribution */ if (pba->has_cdm == _TRUE_) { ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == newtonian) ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; } /* dcdm contribution */ if (pba->has_dcdm == _TRUE_) { ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm]; ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm]; } /* fluid contribution */ if (pba->has_fld == _TRUE_) { w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today); ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld]; ppw->rho_plus_p_theta += (1.+w)*ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_theta_fld]; ppw->delta_p = ppw->delta_p + pba->cs2_fld * ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld]; } /* ultra-relativistic decay radiation */ if (pba->has_dr == _TRUE_) { /* We have delta_rho_dr = rho_dr * F0_dr / f, where F follows the convention in astro-ph/9907388 and f is defined as f = rho_dr*a^4/rho_crit_today. In CLASS density units rho_crit_today = H0^2. */ rho_dr_over_f = pow(pba->H0/a2,2); ppw->delta_rho += rho_dr_over_f*y[ppw->pv->index_pt_F0_dr]; ppw->rho_plus_p_theta += 4./3.*3./4*k*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+1]; ppw->rho_plus_p_shear += 2./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+2]; ppw->delta_p += 1./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr]; } /* ultra-relativistic neutrino/relics contribution */ if (pba->has_ur == _TRUE_) { ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_ur]*delta_ur; ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*theta_ur; ppw->rho_plus_p_shear = ppw->rho_plus_p_shear + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*shear_ur; ppw->delta_p += 1./3.*ppw->pvecback[pba->index_bg_rho_ur]*delta_ur; } /* non-cold dark matter contribution */ if (pba->has_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){ // The perturbations are evolved integrated: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_ncdm_bg = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; p_ncdm_bg = ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; pseudo_p_ncdm = ppw->pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; rho_plus_p_ncdm = rho_ncdm_bg + p_ncdm_bg; w_ncdm = p_ncdm_bg/rho_ncdm_bg; cg2_ncdm = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg)); if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { ppw->delta_ncdm[n_ncdm] = y[idx]; ppw->theta_ncdm[n_ncdm] = y[idx+1]; ppw->shear_ncdm[n_ncdm] = y[idx+2]; } ppw->delta_rho += rho_ncdm_bg*y[idx]; ppw->rho_plus_p_theta += rho_plus_p_ncdm*y[idx+1]; ppw->rho_plus_p_shear += rho_plus_p_ncdm*y[idx+2]; ppw->delta_p += cg2_ncdm*rho_ncdm_bg*y[idx]; idx += ppw->pv->l_max_ncdm[n_ncdm]+1; } } else{ // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { ppw->delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; ppw->theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); ppw->shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); } ppw->delta_rho += rho_delta_ncdm; ppw->rho_plus_p_theta += rho_plus_p_theta_ncdm; ppw->rho_plus_p_shear += rho_plus_p_shear_ncdm; ppw->delta_p += delta_p_ncdm; } } } /* scalar field contribution. In Newtonian gauge, delta_scf depends on the metric perturbation psi which is inferred from rho_plus_p_shear. So the contribution from the scalar field must be below all species with non-zero shear. */ if (pba->has_scf == _TRUE_) { if (ppt->gauge == synchronous){ delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); delta_p_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] - ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); } else{ /* equation for psi */ psi = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k/k) * ppw->rho_plus_p_shear; delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi); delta_p_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] - ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi); } ppw->delta_rho += delta_rho_scf; ppw->rho_plus_p_theta += 1./3.* k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf]; ppw->delta_p += delta_p_scf; } /* store delta_m in the current gauge. In perturb_einstein, this will be transformed later on into the gauge-independent variable D = delta_m - 2H'/H \theta_m/k^2 . */ if (ppt->has_source_delta_m == _TRUE_) { /* include baryons and cold dark matter */ delta_rho_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; rho_m = ppw->pvecback[pba->index_bg_rho_b]; if (pba->has_cdm == _TRUE_) { delta_rho_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; rho_m += ppw->pvecback[pba->index_bg_rho_cdm]; } /* include decaying cold dark matter */ if (pba->has_dcdm == _TRUE_) { delta_rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm]; rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } /* include any other species non-relativistic today (like ncdm species) */ if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ delta_rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]*ppw->delta_ncdm[n_ncdm]; rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; } } /* infer delta_m */ ppw->delta_m = delta_rho_m/rho_m; } /* store theta_m in the current gauge. In perturb_einstein, this will be transformed later on into the gauge-independent variable Theta . Note that computing theta_m is necessary also if we want the delta_m source only, because the gauge-invariant delta_m involves theta_m in the current gauge. */ if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) { /* include baryons and cold dark matter */ rho_plus_p_theta_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; rho_plus_p_m = ppw->pvecback[pba->index_bg_rho_b]; if (pba->has_cdm == _TRUE_) { if (ppt->gauge == newtonian) rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_cdm]; } if (pba->has_dcdm == _TRUE_) { rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm]; rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } /* include any other species non-relativistic today (like ncdm species) */ if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_plus_p_theta_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm])*ppw->theta_ncdm[n_ncdm]; rho_plus_p_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); } } /* infer theta_m */ ppw->theta_m = rho_plus_p_theta_m/rho_plus_p_m; } } if (_vectors_) { ppw->vector_source_pi = 0.; ppw->vector_source_v = 0.; /** photon contribution to vector sources: */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppw->vector_source_v += 4./3.*a2*ppw->pvecback[pba->index_bg_rho_g] * (-1./4.*_SQRT2_) * (y[ppw->pv->index_pt_delta_g]+2.*y[ppw->pv->index_pt_delta_g]+y[ppw->pv->index_pt_shear_g]); ppw->vector_source_pi += 1./3.*a2*ppw->pvecback[pba->index_bg_rho_g] * (6.*_SQRT2_/5./sqrt(1.-2.*pba->K/k/k)) * (4./3./k*y[ppw->pv->index_pt_theta_g]+y[ppw->pv->index_pt_l3_g]); } } /* baryons */ } if (_tensors_) { ppw->gw_source = 0.0; /** photon contribution to gravitational wave source: */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppw->gw_source += (-_SQRT6_*4*a2*ppw->pvecback[pba->index_bg_rho_g]* (1./15.*y[ppw->pv->index_pt_delta_g]+ 4./21.*y[ppw->pv->index_pt_shear_g]+ 1./35.*y[ppw->pv->index_pt_l3_g+1])); } } /** ur contribution to gravitational wave source: */ if (ppt->evolve_tensor_ur == _TRUE_){ rho_relativistic = 0.; if (ppt->tensor_method == tm_exact) rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur]; if (ppt->tensor_method == tm_massless_approximation) { if (pba->has_ur == _TRUE_) rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur]; if (pba->has_ncdm == _TRUE_) { for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) { /* (3 p_ncdm1) is the "relativistic" contrinution to rho_ncdm1 */ rho_relativistic += 3.*ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; } } } ppw->gw_source += (-_SQRT6_*4*a2*rho_relativistic* (1./15.*y[ppw->pv->index_pt_delta_ur]+ 4./21.*y[ppw->pv->index_pt_shear_ur]+ 1./35.*y[ppw->pv->index_pt_l3_ur+1])); } /** ncdm contribution to gravitational wave source: */ if (ppt->evolve_tensor_ncdm == _TRUE_){ idx = ppw->pv->index_pt_psi0_ncdm1; // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ gwncdm = 0.; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); gwncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*(1./15.*y[idx]+2./21.*y[idx+2]+1./35.*y[idx+4]); //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } gwncdm *= -_SQRT6_*4*a2*factor; ppw->gw_source += gwncdm; } } } return _SUCCESS_; } /** * Compute the source functions (three terms for temperature, one for * E or B modes, etc.) * * This is one of the few functions in the code which are passed to * the generic_integrator() routine. Since generic_integrator() * should work with functions passed from various modules, the format * of the arguments is a bit special: * * - fixed parameters and workspaces are passed through a generic * pointer. generic_integrator() doesn't know the content of this * pointer. * * - the error management is a bit special: errors are not written as * usual to pth->error_message, but to a generic error_message passed * in the list of arguments. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Input: vector of time derivative of perturbations * @param index_tau Input: index in the array tau_sampling * @param parameters_and_workspace Input/Output: in input, all parameters needed by perturb_derivs, in ourput, source terms * @param error_message Output: error message * @return the error status */ int perturb_sources( double tau, double * y, double * dy, int index_tau, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ double P; int index_type; struct perturb_parameters_and_workspace * pppaw; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; int index_md; int index_ic; int index_k; double k; double z; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; double delta_g, delta_rho_scf, rho_plus_p_theta_scf; double a_prime_over_a=0.; /* (a'/a) */ double a_prime_over_a_prime=0.; /* (a'/a)' */ int switch_isw = 1; double a_rel, a2_rel, f_dr; /** - rename structure fields (just to avoid heavy notations) */ pppaw = parameters_and_workspace; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; index_md = pppaw->index_md; index_ic = pppaw->index_ic; index_k = pppaw->index_k; k = pppaw->k; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; /** - get background/thermo quantities in this point */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_closeby, &(ppw->last_index_back), pvecback), pba->error_message, error_message); z = pba->a_today/pvecback[pba->index_bg_a]-1.; class_call(thermodynamics_at_z(pba, pth, z, /* redshift z=1/a-1 */ pth->inter_closeby, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); a_rel = ppw->pvecback[pba->index_bg_a]/pba->a_today; a2_rel = a_rel * a_rel; /* derived background quantities, useful only in synchronous gauge */ if (ppt->gauge == synchronous) { a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */ a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */ } /* scalars */ if (_scalars_) { /** - compute metric perturbations */ class_call(perturb_einstein(ppr, pba, pth, ppt, index_md, k, tau, y, ppw), ppt->error_message, error_message); /** - compute quantities depending on approximation schemes */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { delta_g = ppw->rsa_delta_g; P = 0.; } else { delta_g = y[ppw->pv->index_pt_delta_g]; if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) P = 5.* ppw->s_l[2] * ppw->tca_shear_g/8.; /* (2.5+0.5+2)shear_g/8 */ else P = (y[ppw->pv->index_pt_pol0_g] + y[ppw->pv->index_pt_pol2_g] + 2.* ppw->s_l[2] *y[ppw->pv->index_pt_shear_g])/8.; } /** for each type, compute source terms */ /* scalar temperature */ if (ppt->has_source_t == _TRUE_) { /* check whether integrated Sachs-Wolf term should be included */ if ((ppt->switch_eisw == 0) && (z >= ppt->eisw_lisw_split_z)){ switch_isw = 0; } if ((ppt->switch_lisw == 0) && (z < ppt->eisw_lisw_split_z)) { switch_isw=0; } /* newtonian gauge: simplest form, not efficient numerically */ /* if (ppt->gauge == newtonian) { _set_source_(ppt->index_tp_t0) = pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_phi_prime] + pvecthermo[pth->index_th_g] * delta_g / 4.; _set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_exp_m_kappa] * k* pvecmetric[ppw->index_mt_psi] + pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b]/k; _set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_g] * P; } */ /* newtonian gauge: slightly more complicated form, but more efficient numerically */ if (ppt->gauge == newtonian) { _set_source_(ppt->index_tp_t0) = ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g / 4. + pvecmetric[ppw->index_mt_psi]) + switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_phi]-pvecmetric[ppw->index_mt_psi]) + pvecthermo[pth->index_th_exp_m_kappa] * 2. * pvecmetric[ppw->index_mt_phi_prime]) + ppt->switch_dop /k/k * (pvecthermo[pth->index_th_g] * dy[ppw->pv->index_pt_theta_b] + pvecthermo[pth->index_th_dg] * y[ppw->pv->index_pt_theta_b]); _set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k* (pvecmetric[ppw->index_mt_psi]-y[ppw->pv->index_pt_phi]); _set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P; } /* synchronous gauge: simplest form, not efficient numerically */ /* if (ppt->gauge == synchronous) { _set_source_(ppt->index_tp_t0) = - pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_h_prime] / 6. + pvecthermo[pth->index_th_g] / 4. * delta_g; _set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b] / k; _set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_exp_m_kappa] * k*k* 2./3. * ppw->s_l[2] * pvecmetric[ppw->index_mt_alpha] + pvecthermo[pth->index_th_g] * P; } */ /* synchronous gauge: slightly more complicated form, but more efficient numerically */ if (ppt->gauge == synchronous) { _set_source_(ppt->index_tp_t0) = ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime]) + switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_eta] - pvecmetric[ppw->index_mt_alpha_prime] - 2 * a_prime_over_a * pvecmetric[ppw->index_mt_alpha]) + pvecthermo[pth->index_th_exp_m_kappa] * 2. * (pvecmetric[ppw->index_mt_eta_prime] - a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime])) + ppt->switch_dop * (pvecthermo[pth->index_th_g] * (dy[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha_prime]) +pvecthermo[pth->index_th_dg] * (y[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha])); _set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k * (pvecmetric[ppw->index_mt_alpha_prime] + 2. * a_prime_over_a * pvecmetric[ppw->index_mt_alpha] - y[ppw->pv->index_pt_eta]); _set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P; } } /* scalar polarization */ if (ppt->has_source_p == _TRUE_) { /* all gauges. Note that the correct formula for the E source should have a minus sign, as shown in Hu & White. We put a plus sign to comply with the 'historical convention' established in CMBFAST and CAMB. */ _set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P; } /* now, non-CMB sources */ /* Bardeen potential -PHI_H = phi in Newtonian gauge */ if (ppt->has_source_phi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_phi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_eta] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha]; } /* its derivative phi' */ if (ppt->has_source_phi_prime == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_phi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_eta] - a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime]; } /* diff of Bardeen potentials PHI_A-PHI_H = psi + phi in newtonian gauge */ if (ppt->has_source_phi_plus_psi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi_plus_psi) = y[ppw->pv->index_pt_phi] + pvecmetric[ppw->index_mt_psi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi_plus_psi) = y[ppw->pv->index_pt_eta] + pvecmetric[ppw->index_mt_alpha_prime]; } /* Bardeen potential PHI_A = psi in newtonian gauge */ if (ppt->has_source_psi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_psi) = pvecmetric[ppw->index_mt_psi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_psi) = a_prime_over_a * pvecmetric[ppw->index_mt_alpha] + pvecmetric[ppw->index_mt_alpha_prime]; } /* total matter overdensity (gauge-invariant, defined as in arXiv:1307.1459) */ if (ppt->has_source_delta_m == _TRUE_) { _set_source_(ppt->index_tp_delta_m) = ppw->delta_m; } /* delta_g */ if (ppt->has_source_delta_g == _TRUE_) { _set_source_(ppt->index_tp_delta_g) = delta_g; } /* delta_baryon */ if (ppt->has_source_delta_b == _TRUE_) { _set_source_(ppt->index_tp_delta_b) = y[ppw->pv->index_pt_delta_b]; } /* delta_cdm */ if (ppt->has_source_delta_cdm == _TRUE_) { _set_source_(ppt->index_tp_delta_cdm) = y[ppw->pv->index_pt_delta_cdm]; } /* delta_dcdm */ if (ppt->has_source_delta_dcdm == _TRUE_) { _set_source_(ppt->index_tp_delta_dcdm) = y[ppw->pv->index_pt_delta_dcdm]; } /* delta_fld */ if (ppt->has_source_delta_fld == _TRUE_) { _set_source_(ppt->index_tp_delta_fld) = y[ppw->pv->index_pt_delta_fld]; } /* delta_scf */ if (ppt->has_source_delta_scf == _TRUE_) { if (ppt->gauge == synchronous){ delta_rho_scf = 1./3.* (1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); } else{ delta_rho_scf = 1./3.* (1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2_rel*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]); } _set_source_(ppt->index_tp_delta_scf) = delta_rho_scf/pvecback[pba->index_bg_rho_scf]; } /* phi_smg TODO: either change the name of the source or write delta_phi_dot */ if (ppt->has_source_phi_smg == _TRUE_) { _set_source_(ppt->index_tp_phi_smg) = pvecmetric[ppw->index_mt_vx_smg]; } /* delta_dr */ if (ppt->has_source_delta_dr == _TRUE_) { f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; _set_source_(ppt->index_tp_delta_dr) = y[ppw->pv->index_pt_F0_dr]/f_dr; } /* delta_ur */ if (ppt->has_source_delta_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_delta_ur) = y[ppw->pv->index_pt_delta_ur]; else _set_source_(ppt->index_tp_delta_ur) = ppw->rsa_delta_ur; } /* delta_ncdm1 */ if (ppt->has_source_delta_ncdm == _TRUE_) { for (index_type = ppt->index_tp_delta_ncdm1; index_type < ppt->index_tp_delta_ncdm1+pba->N_ncdm; index_type++) { _set_source_(index_type) = ppw->delta_ncdm[index_type - ppt->index_tp_delta_ncdm1]; } } /* total velocity (gauge-invariant, defined as in arXiv:1307.1459) */ if (ppt->has_source_theta_m == _TRUE_) { _set_source_(ppt->index_tp_theta_m) = ppw->theta_m; } /* theta_g */ if (ppt->has_source_theta_g == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_theta_g) = y[ppw->pv->index_pt_theta_g]; else _set_source_(ppt->index_tp_theta_g) = ppw->rsa_theta_g; } /* theta_baryon */ if (ppt->has_source_theta_b == _TRUE_) { _set_source_(ppt->index_tp_theta_b) = y[ppw->pv->index_pt_theta_b]; } /* theta_cdm */ if (ppt->has_source_theta_cdm == _TRUE_) { _set_source_(ppt->index_tp_theta_cdm) = y[ppw->pv->index_pt_theta_cdm]; } /* theta_dcdm */ if (ppt->has_source_theta_dcdm == _TRUE_) { _set_source_(ppt->index_tp_theta_dcdm) = y[ppw->pv->index_pt_theta_dcdm]; } /* theta_fld */ if (ppt->has_source_theta_fld == _TRUE_) { _set_source_(ppt->index_tp_theta_fld) = y[ppw->pv->index_pt_theta_fld]; } /* theta_scf */ if (ppt->has_source_theta_scf == _TRUE_) { rho_plus_p_theta_scf = 1./3.* k*k/a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf]; _set_source_(ppt->index_tp_theta_scf) = rho_plus_p_theta_scf/ (pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]); } /* theta_dr */ if (ppt->has_source_theta_dr == _TRUE_) { f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; _set_source_(ppt->index_tp_theta_dr) = 3./4.*k*y[ppw->pv->index_pt_F0_dr+1]/f_dr; } /* theta_ur */ if (ppt->has_source_theta_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_theta_ur) = y[ppw->pv->index_pt_theta_ur]; else _set_source_(ppt->index_tp_theta_ur) = ppw->rsa_theta_ur; } /* theta_ncdm1 */ if (ppt->has_source_theta_ncdm == _TRUE_) { for (index_type = ppt->index_tp_theta_ncdm1; index_type < ppt->index_tp_theta_ncdm1+pba->N_ncdm; index_type++) { _set_source_(index_type) = ppw->theta_ncdm[index_type - ppt->index_tp_theta_ncdm1]; } } } /* tensors */ if (_tensors_) { /* compute quantities depending on approximation schemes */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { P = -(1./10.*y[ppw->pv->index_pt_delta_g] +2./7.*y[ppw->pv->index_pt_shear_g] +3./70.*y[ppw->pv->index_pt_delta_g+4] -3./5.*y[ppw->pv->index_pt_pol0_g] +6./7.*y[ppw->pv->index_pt_pol2_g] -3./70.*y[ppw->pv->index_pt_pol0_g+4]) /sqrt(6.); } else { P = 2./5.*_SQRT6_*y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC } } else { P = 0.; } /* tensor temperature */ if (ppt->has_source_t == _TRUE_) { _set_source_(ppt->index_tp_t2) = - y[ppw->pv->index_pt_gwdot] * pvecthermo[pth->index_th_exp_m_kappa] + pvecthermo[pth->index_th_g] * P; } /* tensor polarization */ if (ppt->has_source_p == _TRUE_) { /* Note that the correct formula for the polarisation source should have a minus sign, as shown in Hu & White. We put a plus sign to comply with the 'historical convention' established in CMBFAST and CAMB. */ _set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P; } } return _SUCCESS_; } /** * When testing the code or a cosmological model, it can be useful to * output perturbations at each step of integration (and not just the * delta's at each source sampling point, which is acheived simply by * asking for matter transfer functions). Then this function can be * passed to the generic_evolver routine. * * By default, instead of passing this function to generic_evolver, * one passes a null pointer. Then this function is just not used. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Input: vector of its derivatives (already allocated) * @param parameters_and_workspace Input: fixed parameters (e.g. indices) * @param error_message Output : error message * */ int perturb_print_variables(double tau, double * y, double * dy, void * parameters_and_workspace, ErrorMsg error_message ) { struct perturb_parameters_and_workspace * pppaw; double k; int index_md; //struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecmetric; double delta_g,theta_g,shear_g,l4_g,pol0_g,pol1_g,pol2_g,pol4_g; double delta_b,theta_b; double delta_cdm=0.,theta_cdm=0.; double delta_dcdm=0.,theta_dcdm=0.; double delta_dr=0.,theta_dr=0.,shear_dr=0., f_dr=1.0; double delta_ur=0.,theta_ur=0.,shear_ur=0.,l4_ur=0.; double delta_rho_scf=0., rho_plus_p_theta_scf=0.; double delta_scf=0., theta_scf=0.; double V_x_smg=0., V_x_prime_smg=0.; int n_ncdm; double delta_ncdm,theta_ncdm,shear_ncdm; double phi=0.,psi=0.,alpha=0.; double delta_temp=0., delta_chi=0.; double rho_delta_ncdm = 0.0; double rho_plus_p_theta_ncdm = 0.0; double rho_plus_p_shear_ncdm = 0.0; double delta_p_ncdm = 0.0; double factor = 0.0; double q,q2,epsilon; double a,a2,H; int idx,index_q, storeidx; double *dataptr; /** - rename structure fields (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; index_md = pppaw->index_md; //ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecmetric = ppw->pvecmetric; a = pvecback[pba->index_bg_a]; a2 = a*a; H = pvecback[pba->index_bg_H]; /** perturbed recombination **/ if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ delta_temp = y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; delta_chi =y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; } if (_scalars_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; } else { delta_g = ppw->rsa_delta_g; theta_g = ppw->rsa_theta_g; } if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_on) { shear_g = ppw->tca_shear_g; //l3_g = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; pol0_g = 2.5*ppw->tca_shear_g; pol1_g = 7./12.*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; pol2_g = 0.5*ppw->tca_shear_g; //pol3_g = 0.25*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; } else { shear_g = y[ppw->pv->index_pt_shear_g]; //l3_g = y[ppw->pv->index_pt_l3_g]; pol0_g = y[ppw->pv->index_pt_pol0_g]; pol1_g = y[ppw->pv->index_pt_pol1_g]; pol2_g = y[ppw->pv->index_pt_pol2_g]; //pol3_g = y[ppw->pv->index_pt_pol3_g]; } } else { shear_g = 0; //l3_g = 0; pol0_g = 0; pol1_g = 0; pol2_g = 0; //pol3_g = 0.; } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { delta_ur = y[ppw->pv->index_pt_delta_ur]; theta_ur = y[ppw->pv->index_pt_theta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; } else { delta_ur = ppw->rsa_delta_ur; theta_ur = ppw->rsa_theta_ur; shear_ur = 0.; } } delta_b = y[ppw->pv->index_pt_delta_b]; theta_b = y[ppw->pv->index_pt_theta_b]; if (pba->has_cdm == _TRUE_) { delta_cdm = y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == synchronous) { theta_cdm = 0.; } else { theta_cdm = y[ppw->pv->index_pt_theta_cdm]; } } /* gravitational potentials */ if (ppt->gauge == synchronous) { alpha = pvecmetric[ppw->index_mt_alpha]; psi = pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] * alpha + pvecmetric[ppw->index_mt_alpha_prime]; phi = y[ppw->pv->index_pt_eta] - pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; } else if (ppt->gauge == newtonian){ psi = pvecmetric[ppw->index_mt_psi]; phi = y[ppw->pv->index_pt_phi]; } else{ psi = 0.0; phi = 0.0; } if (pba->has_dcdm == _TRUE_) { delta_dcdm = y[ppw->pv->index_pt_delta_dcdm]; theta_dcdm = y[ppw->pv->index_pt_theta_dcdm]; } if (pba->has_dr == _TRUE_) { f_dr = pow(pvecback[pba->index_bg_a]*pvecback[pba->index_bg_a]/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; delta_dr = y[ppw->pv->index_pt_F0_dr]/f_dr; theta_dr = y[ppw->pv->index_pt_F0_dr+1]*3./4.*k/f_dr; shear_dr = y[ppw->pv->index_pt_F0_dr+2]*0.5/f_dr; } if (pba->has_scf == _TRUE_){ if (ppt->gauge == synchronous){ delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]); } else{ delta_rho_scf = 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]); } rho_plus_p_theta_scf = 1./3.* k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf]; delta_scf = delta_rho_scf/pvecback[pba->index_bg_rho_scf]; theta_scf = rho_plus_p_theta_scf/(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]); } if (pba->has_smg == _TRUE_){ //TODO: write here the perturbation variables V_x_smg = ppw->pvecmetric[ppw->index_mt_vx_smg]; V_x_prime_smg = ppw->pvecmetric[ppw->index_mt_vx_prime_smg]; } /* converting synchronous variables to newtonian ones */ if (ppt->gauge == synchronous) { /* density and velocity perturbations (comment out if you wish to keep synchronous variables) */ delta_g -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_g += k*k*alpha; delta_b -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_b += k*k*alpha; if (pba->has_ur == _TRUE_) { delta_ur -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_ur += k*k*alpha; } if (pba->has_dr == _TRUE_) { delta_dr += (-4.*a*H+a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]/pvecback[pba->index_bg_rho_dr])*alpha; theta_dr += k*k*alpha; } if (pba->has_cdm == _TRUE_) { delta_cdm -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_cdm += k*k*alpha; } if (pba->has_dcdm == _TRUE_) { delta_dcdm += alpha*(-a*pba->Gamma_dcdm-3.*a*H); theta_dcdm += k*k*alpha; } if (pba->has_scf == _TRUE_) { delta_scf += alpha*(-3.0*H*(1.0+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf])); theta_scf += k*k*alpha; } } // fprintf(ppw->perturb_output_file," "); /** Handle (re-)allocation */ if (ppt->scalar_perturbations_data[ppw->index_ikout] == NULL){ class_alloc(ppt->scalar_perturbations_data[ppw->index_ikout], sizeof(double)*ppt->number_of_scalar_titles, error_message); ppt->size_scalar_perturbation_data[ppw->index_ikout] = 0; } else{ ppt->scalar_perturbations_data[ppw->index_ikout] = realloc(ppt->scalar_perturbations_data[ppw->index_ikout], sizeof(double)*(ppt->size_scalar_perturbation_data[ppw->index_ikout]+ppt->number_of_scalar_titles)); } storeidx = 0; dataptr = ppt->scalar_perturbations_data[ppw->index_ikout]+ ppt->size_scalar_perturbation_data[ppw->index_ikout]; ppt->size_scalar_perturbation_data[ppw->index_ikout] += ppt->number_of_scalar_titles; class_store_double(dataptr, tau, _TRUE_, storeidx); class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx); class_store_double(dataptr, delta_g, _TRUE_, storeidx); class_store_double(dataptr, theta_g, _TRUE_, storeidx); class_store_double(dataptr, shear_g, _TRUE_, storeidx); class_store_double(dataptr, pol0_g, _TRUE_, storeidx); class_store_double(dataptr, pol1_g, _TRUE_, storeidx); class_store_double(dataptr, pol2_g, _TRUE_, storeidx); class_store_double(dataptr, delta_b, _TRUE_, storeidx); class_store_double(dataptr, theta_b, _TRUE_, storeidx); class_store_double(dataptr, psi, _TRUE_, storeidx); class_store_double(dataptr, phi, _TRUE_, storeidx); /* perturbed recombination */ class_store_double(dataptr, delta_temp, ppt->has_perturbed_recombination, storeidx); class_store_double(dataptr, delta_chi, ppt->has_perturbed_recombination, storeidx); /* Ultra relativistic species */ class_store_double(dataptr, delta_ur, pba->has_ur, storeidx); class_store_double(dataptr, theta_ur, pba->has_ur, storeidx); class_store_double(dataptr, shear_ur, pba->has_ur, storeidx); /* Cold dark matter */ class_store_double(dataptr, delta_cdm, pba->has_cdm, storeidx); class_store_double(dataptr, theta_cdm, pba->has_cdm, storeidx); /* Non-cold Dark Matter */ if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr, ppw->delta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, ppw->theta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, ppw->shear_ncdm[n_ncdm], _TRUE_, storeidx); } } /* Decaying cold dark matter */ class_store_double(dataptr, delta_dcdm, pba->has_dcdm, storeidx); class_store_double(dataptr, theta_dcdm, pba->has_dcdm, storeidx); /* Decay radiation */ class_store_double(dataptr, delta_dr, pba->has_dr, storeidx); class_store_double(dataptr, theta_dr, pba->has_dr, storeidx); class_store_double(dataptr, shear_dr, pba->has_dr, storeidx); /* Scalar field scf*/ class_store_double(dataptr, delta_scf, pba->has_scf, storeidx); class_store_double(dataptr, theta_scf, pba->has_scf, storeidx); /* Scalar field smg*/ class_store_double(dataptr, V_x_smg, pba->has_smg, storeidx); class_store_double(dataptr, V_x_prime_smg, pba->has_smg, storeidx); } if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { delta_g = y[ppw->pv->index_pt_delta_g]; shear_g = y[ppw->pv->index_pt_shear_g]; l4_g = y[ppw->pv->index_pt_delta_g+4]; pol0_g = y[ppw->pv->index_pt_pol0_g]; pol2_g = y[ppw->pv->index_pt_pol2_g]; pol4_g = y[ppw->pv->index_pt_pol0_g+4]; } else { delta_g = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC shear_g = 0.; l4_g = 0.; pol0_g = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC pol2_g = 0.; pol4_g = 0.; } } else { delta_g = 0.; shear_g = 0.; l4_g = 0.; pol0_g = 0.; pol2_g = 0.; pol4_g = 0.; } if (ppt->evolve_tensor_ur == _TRUE_){ delta_ur = y[ppw->pv->index_pt_delta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; l4_ur = y[ppw->pv->index_pt_delta_ur+4]; } /** Handle (re-)allocation */ if (ppt->tensor_perturbations_data[ppw->index_ikout] == NULL){ class_alloc(ppt->tensor_perturbations_data[ppw->index_ikout], sizeof(double)*ppt->number_of_tensor_titles, error_message); ppt->size_tensor_perturbation_data[ppw->index_ikout] = 0; } else{ ppt->tensor_perturbations_data[ppw->index_ikout] = realloc(ppt->tensor_perturbations_data[ppw->index_ikout], sizeof(double)*(ppt->size_tensor_perturbation_data[ppw->index_ikout]+ppt->number_of_tensor_titles)); } storeidx = 0; dataptr = ppt->tensor_perturbations_data[ppw->index_ikout]+ ppt->size_tensor_perturbation_data[ppw->index_ikout]; ppt->size_tensor_perturbation_data[ppw->index_ikout] += ppt->number_of_tensor_titles; //fprintf(ppw->perturb_output_file," "); class_store_double(dataptr, tau, _TRUE_, storeidx); class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx); class_store_double(dataptr, delta_g, _TRUE_, storeidx); class_store_double(dataptr, shear_g, _TRUE_, storeidx); class_store_double(dataptr, l4_g, _TRUE_, storeidx); class_store_double(dataptr, pol0_g, _TRUE_, storeidx); class_store_double(dataptr, pol2_g, _TRUE_, storeidx); class_store_double(dataptr, pol4_g, _TRUE_, storeidx); class_store_double(dataptr, y[ppw->pv->index_pt_gw], _TRUE_, storeidx); class_store_double(dataptr, y[ppw->pv->index_pt_gwdot], _TRUE_, storeidx); class_store_double(dataptr, delta_ur, ppt->evolve_tensor_ur, storeidx); class_store_double(dataptr, shear_ur, ppt->evolve_tensor_ur, storeidx); class_store_double(dataptr, l4_ur, ppt->evolve_tensor_ur, storeidx); //printf("index_pt_delta+ur = %d\n",ppw->pv->index_pt_delta_ur); /* Non-cold Dark Matter */ if (ppt->evolve_tensor_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; delta_ncdm = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; theta_ncdm = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); shear_ncdm = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); class_store_double(dataptr, delta_ncdm, _TRUE_, storeidx); class_store_double(dataptr, theta_ncdm, _TRUE_, storeidx); class_store_double(dataptr, shear_ncdm, _TRUE_, storeidx); } } // fprintf(ppw->perturb_output_file,"\n"); } return _SUCCESS_; } /** * Compute derivative of all perturbations to be integrated * * For each mode (scalar/vector/tensor) and each wavenumber k, this * function computes the derivative of all values in the vector of * perturbed variables to be integrated. * * This is one of the few functions in the code which are passed to the generic_integrator() routine. * Since generic_integrator() should work with functions passed from various modules, the format of the arguments * is a bit special: * - fixed parameters and workspaces are passed through a generic pointer. * generic_integrator() doesn't know what the content of this pointer is. * - errors are not written as usual in pth->error_message, but in a generic * error_message passed in the list of arguments. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Ouput: vector of its derivatives (already allocated) * @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, background and thermo quantities evaluated at tau. * @param error_message Output : error message */ int perturb_derivs(double tau, double * y, double * dy, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* multipole */ int l; /* scale factor and other background quantities */ double a,a2,a_prime_over_a,R; /* short-cut names for the fields of the input structure */ struct perturb_parameters_and_workspace * pppaw; double k,k2; int index_md; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; double * s_l; struct perturb_vector * pv; /* short-cut notations for the perturbations */ double delta_g=0.,theta_g=0.,shear_g=0.; double delta_b,theta_b; double cb2,cs2,ca2; double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_ufa_class=0.; /* perturbed recombination (just to simplify the notation) */ double H0=0.,Nnow=0.,n_H=0.,fHe=0.; double delta_temp=0.,delta_chi=0., chi=0.; double alpha_rec=0.,delta_alpha_rec=0.; double a_rad=0., Compton_CR =0.; double Tb_in_K=0.; /* Non-metric source terms for photons, i.e. \mathcal{P}^{(m)} from arXiv:1305.3261 */ double P0,P1,P2; /* for use with fluid (fld): */ double w,w_prime; /* for use with non-cold dark matter (ncdm): */ int index_q,n_ncdm,idx; double q,epsilon,dlnf0_dlnq,qk_div_epsilon; double rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm,w_ncdm,ca2_ncdm,ceff2_ncdm=0.,cvis2_ncdm=0.; /* for use with curvature */ double cotKgen, sqrt_absK; double s2_squared, ssqrt3; /* for use with dcdm and dr */ double f_dr, fprime_dr; /** - rename the fields of the input structure (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; k2=k*k; index_md = pppaw->index_md; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; s_l = ppw->s_l; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; pv = ppw->pv; /** - get background/thermo quantities in this point */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_closeby, &(ppw->last_index_back), pvecback), pba->error_message, error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); /** get metric perturbations with perturb_einstein() */ class_call(perturb_einstein(ppr, pba, pth, ppt, index_md, k, tau, y, ppw), ppt->error_message, error_message); /** - compute related background quantities */ a = pvecback[pba->index_bg_a]; a2 = a*a; a_prime_over_a = pvecback[pba->index_bg_H] * a; R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b]; /** Compute 'generalised cotK function of argument sqrt(|K|)*tau, for closing hierarchy. (see equation 2.34 in arXiv:1305.3261): */ if (pba->has_curvature == _FALSE_){ cotKgen = 1.0/(k*tau); } else{ sqrt_absK = sqrt(fabs(pba->K)); if (pba->K < 0) cotKgen = sqrt_absK/k/tanh(sqrt_absK*tau); else cotKgen = sqrt_absK/k/tan(sqrt_absK*tau); } s2_squared = 1.-3.*pba->K/k2; /** - for scalar mode: */ if (_scalars_) { /** (a) define short-cut notations for the scalar perturbations */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; } delta_b = y[pv->index_pt_delta_b]; theta_b = y[pv->index_pt_theta_b]; cb2 = pvecthermo[pth->index_th_cb2]; /** (b) perturbed recombination **/ if ((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca]==(int)tca_off)){ delta_temp= y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; delta_chi= y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; chi=pvecthermo[pth->index_th_xe]; // Conversion of H0 in inverse seconds (pba->H0 is [H0/c] in inverse Mpcs) H0 = pba->H0 * _c_ / _Mpc_over_m_; //Computation of Nnow in SI units Nnow = 3.*H0*H0*pba->Omega0_b*(1.-pth->YHe)/(8.*_PI_*_G_*_m_H_); // total amount of hydrogen today n_H = (pba->a_today/a)*(pba->a_today/a)*(pba->a_today/a)* Nnow; // Helium-to-hydrogen ratio fHe = pth->YHe / (_not4_*(1-pth->YHe)); // The constant such that rho_gamma = a_rad * T^4 a_rad = 8./15.*pow(_PI_,5)*pow(_k_B_,4)/pow(_c_*_h_P_,3); // Compton cooling rate in Mpc^(-1) Compton_CR = 8./3. *_sigma_ * a_rad /(_m_e_ * _c_ *_c_) *_Mpc_over_m_ ; // Temperature is already in Kelvin Tb_in_K = pvecthermo[pth->index_th_Tb]; // Alpha in m^3/s, cf. Recfast paper alpha_rec = 1.14 * 4.309e-19*pow((Tb_in_K * 1e-4),-0.6166)/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) ; // delta alpha, dimensionless delta_alpha_rec= (-0.6166 + 0.6703 * pow((Tb_in_K * 1e-4),0.53)*(-0.6166-0.53))/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) * delta_temp; } // end of perturbed recombination related quantities /** (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below) Each continuity equation contains a term in (theta+metric_continuity) with metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge Each Euler equation contains a source term metric_euler with metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge Each shear derivative equation contains a source term metric_shear equal to metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge metric_shear_prime is the derivative of metric_shear In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge, (-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */ if (ppt->gauge == synchronous) { metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.; metric_euler = 0.; metric_shear = k2 * pvecmetric[ppw->index_mt_alpha]; //metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime]; metric_ufa_class = pvecmetric[ppw->index_mt_h_prime]/2.; } if (ppt->gauge == newtonian) { metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime]; metric_euler = k2*pvecmetric[ppw->index_mt_psi]; metric_shear = 0.; //metric_shear_prime = 0.; metric_ufa_class = -6.*pvecmetric[ppw->index_mt_phi_prime]; } /** (d) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { delta_g = ppw->rsa_delta_g; theta_g = ppw->rsa_theta_g; } /** (e) BEGINNING OF ACTUAL SYSTEM OF EQUATIONS OF EVOLUTION: */ /* Note concerning perturbed recombination: $cb2*delta_b$ must be replaced everywhere by $cb2*(delta_b+delta_temp)$. If perturbed recombination is not required, delta_temp is equal to zero. */ /** -> photon temperature density */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { dy[pv->index_pt_delta_g] = -4./3.*(theta_g+metric_continuity); } /** -> baryon density */ dy[pv->index_pt_delta_b] = -(theta_b+metric_continuity); /** -> baryon velocity (depends on tight-coupling approximation=tca) */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* without tca */ /** perturbed recombination has an impact **/ dy[pv->index_pt_theta_b] = - a_prime_over_a*theta_b + metric_euler + k2*cb2*(delta_b+delta_temp) + R*pvecthermo[pth->index_th_dkappa]*(theta_g-theta_b); } else { /* with tca */ class_call(perturb_tca_slip_and_shear(y,pppaw,error_message), error_message, error_message); /** perturbed recombination has an impact **/ dy[pv->index_pt_theta_b] = (-a_prime_over_a*theta_b +k2*(cb2*(delta_b+delta_temp)+R*(delta_g/4.-s2_squared*ppw->tca_shear_g)) +R*ppw->tca_slip)/(1.+R) +metric_euler; } /** -> photon temperature higher momenta and photon polarisation (depend on tight-coupling approximation) : */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** ---> if photon tight-coupling is off: */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /** -----> define \f$ \Pi = G_{\gamma 0} + G_{\gamma 2} + F_{\gamma 2} \f$ */ P0 = (y[pv->index_pt_pol0_g] + y[pv->index_pt_pol2_g] + 2.*s_l[2]*y[pv->index_pt_shear_g])/8.; /** -----> photon temperature velocity */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s2_squared*y[pv->index_pt_shear_g]) + metric_euler + pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g); /** -----> photon temperature shear */ dy[pv->index_pt_shear_g] = 0.5*(8./15.*(theta_g+metric_shear) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_l3_g] -pvecthermo[pth->index_th_dkappa]*(2.*y[pv->index_pt_shear_g]-4./5./s_l[2]*P0)); /** -----> photon temperature l=3 */ l = 3; dy[pv->index_pt_l3_g] = k/(2.0*l+1.0)* (l*s_l[l]*2.*s_l[2]*y[pv->index_pt_shear_g]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_g+1]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /** -----> photon temperature l>3 */ for (l = 4; l < pv->l_max_g; l++) { dy[pv->index_pt_delta_g+l] = k/(2.0*l+1.0)* (l*s_l[l]*y[pv->index_pt_delta_g+l-1]-(l+1)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; } /** -----> photon temperature lmax */ l = pv->l_max_g; /* l=lmax */ dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /** -----> photon polarisation l=0 */ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-4.*P0); /** -----> photon polarisation l=1 */ dy[pv->index_pt_pol1_g] = k/3.*(y[pv->index_pt_pol1_g-1]-2.*s_l[2]*y[pv->index_pt_pol1_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol1_g]; /** -----> photon polarisation l=2 */ dy[pv->index_pt_pol2_g] = k/5.*(2.*s_l[2]*y[pv->index_pt_pol2_g-1]-3.*s_l[3]*y[pv->index_pt_pol2_g+1]) -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol2_g]-4./5.*P0); /** -----> photon polarisation l>2 */ for (l=3; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /** -----> photon polarisation lmax_pol */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; } /** ---> if photon tight-coupling is on: */ else { /** ----> in that case, only need photon velocity */ /** perturbed recombination has an impact **/ dy[pv->index_pt_theta_g] = -(dy[pv->index_pt_theta_b]+a_prime_over_a*theta_b-cb2*k2*(delta_b+delta_temp))/R +k2*(0.25*delta_g-s2_squared*ppw->tca_shear_g)+(1.+R)/R*metric_euler; } } /** -> cdm */ if (pba->has_cdm == _TRUE_) { /** ---> newtonian gauge: cdm density and velocity */ if (ppt->gauge == newtonian) { dy[pv->index_pt_delta_cdm] = -(y[pv->index_pt_theta_cdm]+metric_continuity); /* cdm density */ dy[pv->index_pt_theta_cdm] = - a_prime_over_a*y[pv->index_pt_theta_cdm] + metric_euler; /* cdm velocity */ } /** ---> synchronous gauge: cdm density only (velocity set to zero by definition of the gauge) */ if (ppt->gauge == synchronous) { dy[pv->index_pt_delta_cdm] = -metric_continuity; /* cdm density */ } } /* perturbed recombination */ /* computes the derivatives of delta x_e and delta T_b */ if((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)){ // alpha * n_H is in inverse seconds, so we have to multiply it by Mpc_in_sec dy[ppw->pv->index_pt_perturbed_recombination_delta_chi] = - alpha_rec* a * chi*n_H *(delta_alpha_rec + delta_chi + delta_b) * _Mpc_over_m_ / _c_ ; // see the documentation for this formula dy[ppw->pv->index_pt_perturbed_recombination_delta_temp] = 2./3. * dy[ppw->pv->index_pt_delta_b] - a * Compton_CR * pow(pba->T_cmb/a, 4) * chi / (1.+chi+fHe) * ( (1.-pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb])*(delta_g + delta_chi*(1.+fHe)/(1.+chi+fHe)) + pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb] *(delta_temp - 1./4. * delta_g) ); } /** -> dcdm and dr */ if (pba->has_dcdm == _TRUE_) { /** -> dcdm */ dy[pv->index_pt_delta_dcdm] = -(y[pv->index_pt_theta_dcdm]+metric_continuity) - a * pba->Gamma_dcdm / k2 * metric_euler; /* dcdm density */ dy[pv->index_pt_theta_dcdm] = - a_prime_over_a*y[pv->index_pt_theta_dcdm] + metric_euler; /* dcdm velocity */ } /** -> dr */ if ((pba->has_dcdm == _TRUE_)&&(pba->has_dr == _TRUE_)) { /* f = rho_dr*a^4/rho_crit_today. In CLASS density units rho_crit_today = H0^2. */ f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; fprime_dr = pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]*pow(a,5)/pow(pba->H0,2); /** -----> dr F0 */ dy[pv->index_pt_F0_dr] = -k*y[pv->index_pt_F0_dr+1]-4./3.*metric_continuity*f_dr+ fprime_dr*(y[pv->index_pt_delta_dcdm]+metric_euler/k2); /** -----> dr F1 */ dy[pv->index_pt_F0_dr+1] = k/3.*y[pv->index_pt_F0_dr]-2./3.*k*y[pv->index_pt_F0_dr+2]*s2_squared + 4*metric_euler/(3.*k)*f_dr + fprime_dr/k*y[pv->index_pt_theta_dcdm]; /** -----> exact dr F2 */ dy[pv->index_pt_F0_dr+2] = 8./15.*(3./4.*k*y[pv->index_pt_F0_dr+1]+metric_shear*f_dr) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_F0_dr+3]; /** -----> exact dr l=3 */ l = 3; dy[pv->index_pt_F0_dr+3] = k/(2.*l+1.)* (l*s_l[l]*s_l[2]*y[pv->index_pt_F0_dr+2]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+4]); /** -----> exact dr l>3 */ for (l = 4; l < pv->l_max_dr; l++) { dy[pv->index_pt_F0_dr+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_F0_dr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+l+1]); } /** -----> exact dr lmax_dr */ l = pv->l_max_dr; dy[pv->index_pt_F0_dr+l] = k*(s_l[l]*y[pv->index_pt_F0_dr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_F0_dr+l]); } /** -> fluid (fld) */ if (pba->has_fld == _TRUE_) { /** ---> factors w, w_prime, adiabatic sound speed ca2 (all three background-related), plus actual sound speed in the fluid rest frame cs2 */ w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today); w_prime = - pba->wa_fld * a / pba->a_today * a_prime_over_a; ca2 = w - w_prime / 3. / (1.+w) / a_prime_over_a; cs2 = pba->cs2_fld; /** ---> fluid density */ dy[pv->index_pt_delta_fld] = -(1+w)*(y[pv->index_pt_theta_fld]+metric_continuity) -3.*(cs2-w)*a_prime_over_a*y[pv->index_pt_delta_fld] -9.*(1+w)*(cs2-ca2)*a_prime_over_a*a_prime_over_a*y[pv->index_pt_theta_fld]/k2; /** ---> fluid velocity */ dy[pv->index_pt_theta_fld] = /* fluid velocity */ -(1.-3.*cs2)*a_prime_over_a*y[pv->index_pt_theta_fld] +cs2*k2/(1.+w)*y[pv->index_pt_delta_fld] +metric_euler; } /** -> scalar field (scf) */ if (pba->has_scf == _TRUE_) { /** ---> field value */ dy[pv->index_pt_phi_scf] = y[pv->index_pt_phi_prime_scf]; /** ---> Klein Gordon equation */ dy[pv->index_pt_phi_prime_scf] = - 2.*a_prime_over_a*y[pv->index_pt_phi_prime_scf] - metric_continuity*pvecback[pba->index_bg_phi_prime_scf] // metric_continuity = h'/2 - (k2 + a2*pvecback[pba->index_bg_ddV_scf])*y[pv->index_pt_phi_scf]; //checked } if (pba->has_smg == _TRUE_) { class_test(ppt->gauge == newtonian, ppt->error_message, "asked for scalar field AND Newtonian gauge. Not yet implemented"); /** ---> scalar field velocity */ dy[pv->index_pt_vx_smg] = pvecmetric[ppw->index_mt_vx_prime_smg]; //y[pv->index_pt_vx_prime_smg]; /** ---> Scalar field acceleration (passes the value obtained in perturb_einstein) */ dy[pv->index_pt_vx_prime_smg] = pvecmetric[ppw->index_mt_vx_prime_prime_smg]; } /** -> ultra-relativistic neutrino/relics (ur) */ if (pba->has_ur == _TRUE_) { /** ---> if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** -----> ur density */ dy[pv->index_pt_delta_ur] = -4./3.*(y[pv->index_pt_theta_ur] + metric_continuity); /** -----> ur velocity */ dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]) + metric_euler; if(ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { /** -----> exact ur shear */ dy[pv->index_pt_shear_ur] = 0.5*(8./15.*(y[pv->index_pt_theta_ur]+metric_shear) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]); /** -----> exact ur l=3 */ l = 3; dy[pv->index_pt_l3_ur] = k/(2.*l+1.)* (l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]); /** -----> exact ur l>3 */ for (l = 4; l < pv->l_max_ur; l++) { dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]); } /** -----> exact ur lmax_ur */ l = pv->l_max_ur; dy[pv->index_pt_delta_ur+l] = k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]); } else { /** -----> in fluid approximation (ufa): only ur shear neeeded */ //TBC: curvature? /* a la Ma & Bertschinger */ if (ppr->ur_fluid_approximation == ufa_mb) { dy[pv->index_pt_shear_ur] = -3./tau*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_shear); } /* a la Hu */ if (ppr->ur_fluid_approximation == ufa_hu) { dy[pv->index_pt_shear_ur] = -3.*a_prime_over_a*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_shear); } /* a la CLASS */ if (ppr->ur_fluid_approximation == ufa_CLASS) { dy[pv->index_pt_shear_ur] = -3./tau*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_ufa_class); } } } } /** -> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */ //TBC: curvature in all ncdm if (pba->has_ncdm == _TRUE_) { idx = pv->index_pt_psi0_ncdm1; /** ---> first case: use a fluid approximation (ncdmfa) */ //TBC: curvature if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) { /** -----> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** -----> define intermediate quantitites */ rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; /* background density */ p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; /* background pressure */ pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; /* pseudo-pressure (see CLASS IV paper) */ w_ncdm = p_ncdm_bg/rho_ncdm_bg; /* eqaution of state parameter */ ca2_ncdm = w_ncdm/3.0/(1.0+w_ncdm)*(5.0-pseudo_p_ncdm/p_ncdm_bg); /* adiabatic sound speed */ /* c_eff is (delta p / delta rho) in the gauge under consideration (not in the gauge comoving with the fluid) */ /* c_vis is introduced in order to close the system */ /* different ansatz for sound speed c_eff and viscosity speed c_vis */ if (ppr->ncdm_fluid_approximation == ncdmfa_mb) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = 3.*w_ncdm*ca2_ncdm; } if (ppr->ncdm_fluid_approximation == ncdmfa_hu) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = w_ncdm; } if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = 3.*w_ncdm*ca2_ncdm; } /** -----> exact continuity equation */ dy[idx] = -(1.0+w_ncdm)*(y[idx+1]+metric_continuity)- 3.0*a_prime_over_a*(ceff2_ncdm-w_ncdm)*y[idx]; /** -----> exact euler equation */ dy[idx+1] = -a_prime_over_a*(1.0-3.0*ca2_ncdm)*y[idx+1]+ ceff2_ncdm/(1.0+w_ncdm)*k2*y[idx]-k2*y[idx+2] + metric_euler; /** -----> different ansatz for approximate shear derivative */ if (ppr->ncdm_fluid_approximation == ncdmfa_mb) { dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear); } if (ppr->ncdm_fluid_approximation == ncdmfa_hu) { dy[idx+2] = -3.0*a_prime_over_a*ca2_ncdm/w_ncdm*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear); } if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) { dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_ufa_class); } /** -----> jump to next species */ idx += pv->l_max_ncdm[n_ncdm]+1; } } /** ---> second case: use exact equation (Boltzmann hierarchy on momentum grid) */ else { /** -----> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** -----> loop over momentum */ for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) { /** -----> define intermediate quantitites */ dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); qk_div_epsilon = k*q/epsilon; /** -----> ncdm density for given momentum bin */ dy[idx] = -qk_div_epsilon*y[idx+1]+metric_continuity*dlnf0_dlnq/3.; /** -----> ncdm velocity for given momentum bin */ dy[idx+1] = qk_div_epsilon/3.0*(y[idx] - 2*s_l[2]*y[idx+2]) -epsilon*metric_euler/(3*q*k)*dlnf0_dlnq; /** -----> ncdm shear for given momentum bin */ dy[idx+2] = qk_div_epsilon/5.0*(2*s_l[2]*y[idx+1]-3.*s_l[3]*y[idx+3]) -s_l[2]*metric_shear*2./15.*dlnf0_dlnq; /** -----> ncdm l>3 for given momentum bin */ for(l=3; l<pv->l_max_ncdm[n_ncdm]; l++){ dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]); } /** -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger) but with curvature taken into account a la arXiv:1305.3261 */ dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l]; /** -----> jump to next momentum bin or species */ idx += (pv->l_max_ncdm[n_ncdm]+1); } } } } /** -> metric */ /** --> eta of synchronous gauge */ if (ppt->gauge == synchronous) { dy[pv->index_pt_eta] = pvecmetric[ppw->index_mt_eta_prime]; } if (ppt->gauge == newtonian) { dy[pv->index_pt_phi] = pvecmetric[ppw->index_mt_phi_prime]; } } /** - vector mode */ if (_vectors_) { fprintf(stderr,"we are in vectors\n"); ssqrt3 = sqrt(1.-2.*pba->K/k2); cb2 = pvecthermo[pth->index_th_cb2]; /** -> baryon velocity */ if (ppt->gauge == synchronous) { dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b] - pvecthermo[pth->index_th_dkappa]*(_SQRT2_/4.*delta_g + y[pv->index_pt_theta_b]); } else if (ppt->gauge == newtonian) { dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b] - _SQRT2_/4.*pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]) + pvecmetric[ppw->index_mt_V_prime]+(1.-3.*cb2)*a_prime_over_a*y[pv->index_pt_V]; } /* if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { */ /* short-cut notations for the tensor perturbations */ delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; shear_g = y[pv->index_pt_shear_g]; /* (P^{(1)}) (see Eq. B.23 in 1305.3261)*/ P1 = -_SQRT6_/40.*( 4./(3.*k)*theta_g //F1 +y[pv->index_pt_delta_g+3] +2.*y[pv->index_pt_pol0_g] +10./7.*y[pv->index_pt_pol2_g] -4./7.*y[pv->index_pt_pol0_g+4]); if (ppt->gauge == synchronous) { /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]); /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1) +4.0/(3.0*_SQRT2_)*ssqrt3*y[pv->index_pt_hv_prime]; } else if (ppt->gauge == newtonian) { /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]) -2.*_SQRT2_*pvecmetric[ppw->index_mt_V_prime]; /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1); } /* photon shear (shear_g = F_2/2) */ dy[pv->index_pt_shear_g] = 4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1] -pvecthermo[pth->index_th_dkappa]*shear_g; /* photon l=3 */ dy[pv->index_pt_l3_g] = k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=4; l < pv->l_max_g; l++) dy[pv->index_pt_delta_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* l=lmax */ l = pv->l_max_g; dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1] -(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* photon polarization, l=0 (pol0_g = G_0)*/ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P1); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=1; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* l=lmax */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* } } */ if (ppt->gauge == synchronous) { /* Vector metric perturbation in synchronous gauge: */ dy[pv->index_pt_hv_prime] = pvecmetric[ppw->index_mt_hv_prime_prime]; } else if (ppt->gauge == newtonian){ /* Vector metric perturbation in Newtonian gauge: */ dy[pv->index_pt_V] = pvecmetric[ppw->index_mt_V_prime]; } } /** - tensor mode */ if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { /* short-cut notations for the tensor perturbations */ delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; shear_g = y[pv->index_pt_shear_g]; /* (P^{(2)}) */ P2 =-1.0/_SQRT6_*( 1./10.*delta_g +2./7.*shear_g +3./70.*y[pv->index_pt_delta_g+4] -3./5.*y[pv->index_pt_pol0_g] +6./7.*y[pv->index_pt_pol2_g] -3./70.*y[pv->index_pt_pol0_g+4]); /* above expression from paper, expression below matches old class but is not correct P2 = -1.0/_SQRT6_*( 1./10.*delta_g +2./35.*shear_g +1./210.*y[pv->index_pt_delta_g+4] -3./5.*y[pv->index_pt_pol0_g] +6./35.*y[pv->index_pt_pol2_g] -1./210.*y[pv->index_pt_pol0_g+4] ); */ /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+_SQRT6_*P2) //+y[pv->index_pt_gwdot]; +_SQRT6_*y[pv->index_pt_gwdot]; //TBC /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*theta_g; /* photon shear (shear_g = F_2/2) */ dy[pv->index_pt_shear_g] = 4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1] -pvecthermo[pth->index_th_dkappa]*shear_g; /* photon l=3 */ dy[pv->index_pt_l3_g] = k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=4; l < pv->l_max_g; l++) dy[pv->index_pt_delta_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* l=lmax */ l = pv->l_max_g; dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1] -(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* photon polarization, l=0 (pol0_g = G_0)*/ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=1; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* l=lmax */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; } } if (ppt->evolve_tensor_ur == _TRUE_) { dy[pv->index_pt_delta_ur] = -4./3.*y[pv->index_pt_theta_ur]+_SQRT6_*y[pv->index_pt_gwdot]; dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]); dy[pv->index_pt_shear_ur] = (4./15.*y[pv->index_pt_theta_ur] -3./10.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]); l = 3; dy[pv->index_pt_l3_ur] = k/(2.*l+1.)* (l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]); for (l = 4; l < pv->l_max_ur; l++) { dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]); } l = pv->l_max_ur; dy[pv->index_pt_delta_ur+l] = k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]); } /** -> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */ //TBC: curvature in all ncdm if (ppt->evolve_tensor_ncdm == _TRUE_) { idx = pv->index_pt_psi0_ncdm1; /** -----> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** -----> loop over momentum */ for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) { /** -----> define intermediate quantitites */ dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); qk_div_epsilon = k*q/epsilon; /** -----> ncdm density for given momentum bin */ dy[idx] = -qk_div_epsilon*y[idx+1]-0.25*_SQRT6_*y[pv->index_pt_gwdot]*dlnf0_dlnq; /** -----> ncdm l>0 for given momentum bin */ for(l=1; l<pv->l_max_ncdm[n_ncdm]; l++){ dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]); } /** -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger) but with curvature taken into account a la arXiv:1305.3261 */ dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l]; /** -----> jump to next momentum bin or species */ idx += (pv->l_max_ncdm[n_ncdm]+1); } } } /* tensor metric perturbation h (gravitational waves) */ dy[pv->index_pt_gw] = y[pv->index_pt_gwdot]; /* its time-derivative */ dy[pv->index_pt_gwdot] = pvecmetric[ppw->index_mt_gw_prime_prime]; } return _SUCCESS_; } int perturb_tca_slip_and_shear(double * y, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* scale factor and other background quantities */ double a,a_prime_over_a,a_primeprime_over_a,R; /* useful terms for tight-coupling approximation */ double slip=0.; double tau_c=0.,dtau_c=0.; double theta_prime,shear_g_prime=0.,theta_prime_prime; double g0,g0_prime,g0_prime_prime; double F=0.,F_prime=0.,F_prime_prime=0.; /* short-cut names for the fields of the input structure */ struct perturb_parameters_and_workspace * pppaw; double k,k2; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; struct perturb_vector * pv; /* short-cut notations for the perturbations */ double delta_g=0.,theta_g=0.,shear_g=0.; double delta_b,theta_b; double Delta; double cb2; double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_shear_prime=0.; /* perturbed recombination */ double delta_temp=0.; /* for use with curvature */ double s2_squared; /** - rename the fields of the input structure (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; k2=k*k; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; pv = ppw->pv; /** - compute related background quantities */ a = pvecback[pba->index_bg_a]; a_prime_over_a = pvecback[pba->index_bg_H] * a; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a; //z = pba->a_today-1.; R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b]; s2_squared = 1.-3.*pba->K/k2; /** (a) define short-cut notations for the scalar perturbations */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; } delta_b = y[pv->index_pt_delta_b]; theta_b = y[pv->index_pt_theta_b]; cb2 = pvecthermo[pth->index_th_cb2]; /* perturbed recombination */ if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ delta_temp = y[pv->index_pt_perturbed_recombination_delta_temp]; } /** (b) define short-cut notations used only in tight-coupling approximation */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; /* inverse of opacity */ dtau_c = -pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c; /* its first derivative wrt conformal time */ F = tau_c/(1+R); /* F = tau_c/(1+R) */ if (ppr->tight_coupling_approximation >= (int)second_order_CLASS) { F_prime = dtau_c/(1+R)+tau_c*a_prime_over_a*R/(1+R)/(1+R); /*F' needed by second_order_CLASS and compromise_CLASS */ if (ppr->tight_coupling_approximation == (int)second_order_CLASS) { F_prime_prime =(- pvecthermo[pth->index_th_dddkappa]*tau_c*tau_c /* F'' needed by second_order_CLASS only */ + 2.*pvecthermo[pth->index_th_ddkappa]*pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c*tau_c)/(1+R) +2.*dtau_c*a_prime_over_a*R/(1+R)/(1+R) +tau_c*((a_primeprime_over_a-2.*a_prime_over_a*a_prime_over_a)+2.*a_prime_over_a*a_prime_over_a*R/(1+R))*R/(1+R)/(1+R); } } /** (d) compute metric-related quantities (depending on gauge; additional gauges can be coded below) Each continuity equation contains a term in (theta+metric_continuity) with metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge Each Euler equation contains a source term metric_euler with metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge Each shear derivative equation contains a source term metric_shear equal to metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge metric_shear_prime is the derivative of metric_shear In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge, (-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */ if (ppt->gauge == synchronous) { metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.; metric_euler = 0.; metric_shear = k2 * pvecmetric[ppw->index_mt_alpha]; metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime]; } if (ppt->gauge == newtonian) { metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime]; metric_euler = k2*pvecmetric[ppw->index_mt_psi]; metric_shear = 0.; metric_shear_prime = 0.; } /** (e) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */ /* free-streaming photon velocity */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) theta_g = ppw->rsa_theta_g; /** -----> like Ma & Bertschinger */ if (ppr->tight_coupling_approximation == (int)first_order_MB) { slip=2.*R/(1.+R)*a_prime_over_a*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** -----> relax assumption dkappa~a^-2 (like in CAMB) */ if ((ppr->tight_coupling_approximation == (int)first_order_CAMB) || (ppr->tight_coupling_approximation == (int)compromise_CLASS)) { slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** -----> also relax assumption cb2~a^-1 */ if ((ppr->tight_coupling_approximation == (int)first_order_CLASS) || (ppr->tight_coupling_approximation == (int)second_order_CLASS)){ slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +pvecthermo[pth->index_th_dcb2]*delta_b +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** -----> intermediate quantities for 2nd order tca: shear_g at first order in tight-coupling */ shear_g=16./45.*tau_c*(theta_g+metric_shear); /* (Ma & Bertschinger give (1/9)*(4/3) instead of (2/15)*(4/3) because they didn't include the contribution of G_gamma0 and G_gamma2, which are of the same order as sigma_g. This was already consistently included in CAMB) */ /** -----> intermediate quantities for 2nd order tca: zero order for theta_b' = theta_g' */ /** perturbed recombination has an impact **/ theta_prime = (-a_prime_over_a*theta_b+k2*(cb2*(delta_b+delta_temp)+R/4.*delta_g))/(1.+R) + metric_euler; /** -----> intermediate quantities for 2nd order tca: shear_g_prime at first order in tight-coupling */ shear_g_prime=16./45.*(tau_c*(theta_prime+metric_shear_prime)+dtau_c*(theta_g+metric_shear)); /** -----> 2nd order as in CRS*/ if (ppr->tight_coupling_approximation == (int)second_order_CRS) { if (ppt->gauge == newtonian) { class_stop(error_message, "the second_order_CRS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme"); } if (ppt->gauge == synchronous) { class_test(pba->sgnK != 0, ppt->error_message, "the second_order_CRS approach to tight-coupling is coded in the flat case only: for non-flat try another tight-coupling scheme"); /* infer Delta from h'' using Einstein equation */ Delta = 2*k2*y[pv->index_pt_eta] -2*a_prime_over_a*pvecmetric[ppw->index_mt_h_prime] -pvecmetric[ppw->index_mt_h_prime_prime]; /* monster expression for slip at second-order in tight-coupling */ slip=(-2./(1.+R)*a_prime_over_a-pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa])*(theta_b-theta_g) +(-a_primeprime_over_a*theta_b -k2*a_prime_over_a*(delta_g/2.-2.*shear_g) +k2*(cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4. +shear_g_prime) )/pvecthermo[pth->index_th_dkappa]/(1.+R) -2.*R*(3.*a_prime_over_a*a_prime_over_a*cb2+(1.+R)*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)-3.*a_prime_over_a*a_prime_over_a) /(1.+R)/(1.+R)/(1.+R)*(theta_b-theta_g)/pvecthermo[pth->index_th_dkappa] +( a_primeprime_over_a*a_prime_over_a*((2.-3.*cb2)*R-2.)*theta_b/(1.+R) +a_prime_over_a*k2*(1.-3.*cb2)*theta_b/3./(1.+R) /* perturbed recombination has an impact (next two lines) */ +a_primeprime_over_a*k2*cb2*(delta_b+delta_temp)/(1.+R) +k2*k2*(3.*cb2-1.)*cb2*(delta_b+delta_temp)/3./(1.+R) +k2*k2*R*(3.*cb2-1.)*delta_g/12./(1.+R) +a_primeprime_over_a*k2*(2.+3.*R)*delta_g/4./(1.+R) +a_prime_over_a*a_prime_over_a*k2*((2.-3.*cb2)*R-1.)*delta_g/2./(1.+R) +a_prime_over_a*k2*cb2*(1.+(3.*cb2-2.)*R)*(-theta_b-metric_continuity)/(1.+R) +a_prime_over_a*k2*(2.+(5.-3.*cb2)*R)*4./3.*(-theta_g-metric_continuity)/4./(1.+R) +a_prime_over_a*(1.-3.*cb2)*k2*2.*metric_shear/3. +k2*k2*(3.*cb2-1.)*y[pv->index_pt_eta]/3. +2.*a_prime_over_a*k2*(3.*cb2-1.)*pvecmetric[ppw->index_mt_eta_prime] +k2*(1.-3.*cb2)*Delta/6. )/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/(1.+R)/(1.+R) -(4.*a_primeprime_over_a*theta_b-4.*k2*cb2*(-theta_b-metric_continuity)+2.*a_prime_over_a*k2*delta_g+k2*4./3.*(-theta_g-metric_continuity))/2./(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa] +4.*a_prime_over_a*R/(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+k2*pvecmetric[ppw->index_mt_alpha_prime]); } } /** -----> 2nd order like in CLASS paper */ if (ppr->tight_coupling_approximation == (int)second_order_CLASS) { if (ppt->gauge == newtonian) { class_stop(error_message, "the second_order_CLASS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme"); } if (ppt->gauge == synchronous) { /* zero order for theta_b'' = theta_g'' */ theta_prime_prime = ((R-1.)*a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b +k2*(pvecthermo[pth->index_th_dcb2]*delta_b+cb2*(-theta_b-metric_continuity)-a_prime_over_a*R/4.*delta_g+R/4.*4./3.*(-theta_g-metric_continuity)))/(1.+R); /* zero-order quantities g0, g0', go'' */ g0 = -a_prime_over_a*theta_b + k2*(cb2*delta_b-delta_g/4.); g0_prime = -a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+(1./3.-cb2)*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])); g0_prime_prime = -a_prime_over_a*theta_prime_prime-2.*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_prime -(2.*a_prime_over_a*a_prime_over_a*a_prime_over_a-3.*a_primeprime_over_a*a_prime_over_a)*theta_b +k2*(pvecthermo[pth->index_th_ddcb2]*delta_b-2.*pvecthermo[pth->index_th_dcb2]*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])+(1./3.-cb2)*(theta_prime+0.5*pvecmetric[ppw->index_mt_h_prime_prime])); /* slip at second order */ slip = (1.-2*a_prime_over_a*F)*slip + F*k2*s2_squared*(2.*a_prime_over_a*shear_g+shear_g_prime) -F*(F_prime_prime*g0+2.*F_prime*g0_prime+F*g0_prime_prime); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime); } } /** -----> add only the most important 2nd order terms */ if (ppr->tight_coupling_approximation == (int)compromise_CLASS) { /* slip at second order (only leading second-order terms) */ slip = (1.-2.*a_prime_over_a*F)*slip + F*k2*(2.*a_prime_over_a*s2_squared*shear_g+s2_squared*shear_g_prime-(1./3.-cb2)*(F*theta_prime+2.*F_prime*theta_b)); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime); } /** ---> store tight-coupling values of photon shear and its derivative */ ppw->tca_shear_g = shear_g; ppw->tca_slip = slip; return _SUCCESS_; } int perturb_rsa_delta_and_theta( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, double k, double * y, double a_prime_over_a, double * pvecthermo, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ double k2; k2 = k*k; // formulas below TBC for curvaturema /* newtonian gauge */ if (ppt->gauge == newtonian) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_g = 0.; ppw->rsa_theta_g = 0.; } else { ppw->rsa_delta_g = -4.*y[ppw->pv->index_pt_phi]; ppw->rsa_theta_g = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime]; } if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) { ppw->rsa_delta_g += -4./k2*ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_b]; ppw->rsa_theta_g += 3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*y[ppw->pv->index_pt_theta_b] +ppw->pvecthermo[pth->index_th_dkappa]* (-a_prime_over_a*y[ppw->pv->index_pt_theta_b] +ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b] +k2*y[ppw->pv->index_pt_phi])); } if (pba->has_ur == _TRUE_) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_ur = 0.; ppw->rsa_theta_ur = 0.; } else { ppw->rsa_delta_ur = -4.*y[ppw->pv->index_pt_phi]; ppw->rsa_theta_ur = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime]; } } } } /* synchronous gauge */ if (ppt->gauge == synchronous) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_g = 0.; ppw->rsa_theta_g = 0.; } else { ppw->rsa_delta_g = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] -k2*y[ppw->pv->index_pt_eta]); ppw->rsa_theta_g = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime]; } if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) { ppw->rsa_delta_g += -4./k2*ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_b]+0.5*ppw->pvecmetric[ppw->index_mt_h_prime]); ppw->rsa_theta_g += 3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]* (y[ppw->pv->index_pt_theta_b] +0.5*ppw->pvecmetric[ppw->index_mt_h_prime]) +ppw->pvecthermo[pth->index_th_dkappa]* (-a_prime_over_a*y[ppw->pv->index_pt_theta_b] + ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b] -a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] +k2*y[ppw->pv->index_pt_eta])); } if (pba->has_ur == _TRUE_) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_ur = 0.; ppw->rsa_theta_ur = 0.; } else { ppw->rsa_delta_ur = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] -k2*y[ppw->pv->index_pt_eta]); ppw->rsa_theta_ur = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime]; } } } } return _SUCCESS_; }
RNACI.h
/* This Source Code Form is subject to the terms of the BSD 2-Clause * License. If a copy of the this license was not distributed with this * file, you can obtain one from http://opensource.org/licenses/BSD-2-Clause. */ // Copyright 2014-2015, Schmidt #ifndef __RNACI_H__ #define __RNACI_H__ #define RNACI_VERSION 0.2.0 #include <R.h> #include <Rinternals.h> #include <stdarg.h> #include <string.h> #include <stdbool.h> #include <math.h> #include <float.h> #define RNACIMAX(m,n) m<n?n:m #define RNULL R_NilValue // Voodoo Args #define OPTIONALARG1(a,b,...) (a),(b) // R data accessors #define __RNACI_INT(x,y,...) INTEGER(x)[y] #define INT(x,...) __RNACI_INT(x,##__VA_ARGS__,0) #define __RNACI_DBL(x,y,...) REAL(x)[y] #define DBL(x,...) __RNACI_DBL(x,##__VA_ARGS__,0) #define __RNACI_STR(x,y,...) ((char*)CHAR(STRING_ELT(x,y))) #define STR(x,...) __RNACI_STR(x,##__VA_ARGS__,0) #define MatINT(x,i,j) (INTEGER(x)[i+nrows(x)*j]) #define MatDBL(x,i,j) (REAL(x)[i+nrows(x)*j]) #define INTP(x) (INTEGER(x)) #define DBLP(x) (REAL(x)) #define newRptr(ptr,Rptr,fin) PROTECT(Rptr = R_MakeExternalPtr(ptr, R_NilValue, R_NilValue));R_RegisterCFinalizerEx(Rptr, fin, TRUE) #define getRptr(ptr) R_ExternalPtrAddr(ptr); #define newRfreeptrfun(FNAME,TYPE,FREEFUN) \ static void FNAME(SEXP ptr) \ { \ if (NULL == R_ExternalPtrAddr(ptr)) return; \ TYPE *tmp = (TYPE *) R_ExternalPtrAddr(ptr); \ FREEFUN(tmp); \ R_ClearExternalPtr(ptr); \ } // GC stuff #define R_INIT int __RNACI_SEXP_protect_counter=0 #define PT(x) PROTECT((x)); (__RNACI_SEXP_protect_counter)++ #define R_END (UNPROTECT(__RNACI_SEXP_protect_counter)) // Allocations #define newRlist(x,n) PT(x=__Rvecalloc(n, "vec", false)) //#define newRvec(x,n,type) PT(x=__Rvecalloc(n, type)) #define newRvec(x,n,...) PT(x=__Rvecalloc(n,OPTIONALARG1(__VA_ARGS__,false))) //#define newRmat(x,m,n,type) PT(x=__Rmatalloc(m,n,type)) #define newRmat(x,m,n,...) PT(x=__Rmatalloc(m,n,OPTIONALARG1(__VA_ARGS__,false))) /* Misc stuff */ #define nonzero(x) (x?x:1) #define is_null(x) (x==NULL) #if __STDC_VERSION__ >= 199901L #define dbstart printf("DEBUGGING in %s Started\n", __func__);int __RNACI_debug_printing_counter=0 #define dbstop printf("DEBUGGING in %s Ended\n", __func__) #else #define dbstart int __RNACI_debug_printing_counter=0 #endif #define dbshow printf("%d\n", __RNACI_debug_printing_counter);__RNACI_debug_printing_counter++; /*************************************************** * Definitions * ***************************************************/ // alloc.c static inline SEXP __Rvecalloc(int n, char *type, int init) { SEXP RET; int i; if (strcmp(type, "vec") == 0) PROTECT(RET = allocVector(VECSXP, n)); else if (strcmp(type, "int") == 0) { PROTECT(RET = allocVector(INTSXP, n)); if (init) { #if defined( _OPENMP_SUPPORT_SIMD) #pragma omp for simd #endif for (i=0; i<n; i++) INT(RET, i) = 0; } } else if (strcmp(type, "double") == 0 || strcmp(type, "dbl") == 0) { PROTECT(RET = allocVector(REALSXP, n)); if (init) { #if defined( _OPENMP_SUPPORT_SIMD) #pragma omp for simd #endif for (i=0; i<n; i++) DBL(RET, i) = 0.0; } } else if (strcmp(type, "str") == 0 || strcmp(type, "char*") == 0) PROTECT(RET = allocVector(STRSXP, n)); else return NULL; UNPROTECT(1); return RET; } static inline SEXP __Rmatalloc(int m, int n, char *type, int init) { SEXP RET; int i, j; if (strcmp(type, "vec") == 0) PROTECT(RET = allocMatrix(VECSXP, m, n)); else if (strcmp(type, "int") == 0) { PROTECT(RET = allocMatrix(INTSXP, m, n)); if (init) { for (j=0; j<n; j++) { #if defined( _OPENMP_SUPPORT_SIMD) #pragma omp for simd #endif for (i=0; i<m; i++) MatINT(RET, i, j) = 0; } } } else if (strcmp(type, "double") == 0 || strcmp(type, "dbl") == 0) { PROTECT(RET = allocMatrix(REALSXP, m, n)); if (init) { for (j=0; j<n; j++) { #if defined( _OPENMP_SUPPORT_SIMD) #pragma omp for simd #endif for (i=0; i<m; i++) MatDBL(RET, i, j) = 0.0; } } } else if (strcmp(type, "str") == 0 || strcmp(type, "char*") == 0) PROTECT(RET = allocMatrix(STRSXP, m, n)); else return NULL; UNPROTECT(1); return RET; } // floats.c static inline int fis_zerof(float x) { const float abs_epsf = 1.1f * FLT_EPSILON; if (fabsf(x) < abs_epsf*FLT_MIN) return true; else return false; } static inline int fis_zero(double x) { const double abs_eps = 1.1 * DBL_EPSILON; if (fabs(x) < abs_eps*DBL_MIN) return true; else return false; } static inline int fequalsf(float x, float y) { const float abs_epsf = 1.1f * FLT_EPSILON; const double abs_eps = 1.1 * DBL_EPSILON; const double diff = fabsf(x - y); if (x == y) return true; else if (x == 0.0f || y == 0.0f || diff < FLT_MIN) return diff < (abs_epsf*FLT_MIN); else return diff/(fabsf(x) + fabsf(y)) < abs_epsf; } static inline int fequals(double x, double y) { const float abs_epsf = 1.1f * FLT_EPSILON; const double abs_eps = 1.1 * DBL_EPSILON; const double diff = fabs(x - y); if (x == y) return true; else if (x == 0.0 || y == 0.0 || diff < DBL_MIN) return diff < (abs_eps*DBL_MIN); else return diff/(fabs(x) + fabs(y)) < abs_eps; } // misc.c static inline int is_Rnull(SEXP x) { R_INIT; SEXP basePackage; SEXP tmp; PT( basePackage = eval( lang2( install("getNamespace"), ScalarString(mkChar("base")) ), R_GlobalEnv ) ); tmp = eval( lang2( install("is.null"), x), basePackage); R_END; return INT(tmp,0); } static inline int is_Rnan(SEXP x) { R_INIT; SEXP basePackage; SEXP tmp; PT( basePackage = eval( lang2( install("getNamespace"), ScalarString(mkChar("base")) ), R_GlobalEnv ) ); tmp = eval( lang2( install("is.nan"), x), basePackage); R_END; return INT(tmp,0); } static inline int is_Rna(SEXP x) { R_INIT; SEXP basePackage; SEXP tmp; PT( basePackage = eval( lang2( install("getNamespace"), ScalarString(mkChar("base")) ), R_GlobalEnv ) ); tmp = eval( lang2( install("is.na"), x), basePackage); R_END; return INT(tmp,0); } static inline int is_double(SEXP x) { R_INIT; SEXP basePackage; SEXP tmp; PT( basePackage = eval( lang2( install("getNamespace"), ScalarString(mkChar("base")) ), R_GlobalEnv ) ); tmp = eval( lang2( install("is.double"), x), basePackage); R_END; return INT(tmp,0); } static inline int is_integer(SEXP x) { R_INIT; SEXP basePackage; SEXP tmp; PT( basePackage = eval( lang2( install("getNamespace"), ScalarString(mkChar("base")) ), R_GlobalEnv ) ); tmp = eval( lang2( install("is.integer"), x), basePackage); R_END; return INT(tmp,0); } // printing.c static inline void PRINT(SEXP x) { R_INIT; SEXP basePackage; PT( basePackage = eval( lang2( install("getNamespace"), ScalarString(mkChar("base")) ), R_GlobalEnv ) ); eval( lang2( install("print"), x), basePackage); R_END; } // structures_misc.c static inline void set_list_names(SEXP R_list, SEXP R_names) { setAttrib(R_list, R_NamesSymbol, R_names); } static inline void set_df_rownames(SEXP R_df, SEXP R_rownames) { setAttrib(R_df, R_RowNamesSymbol, R_rownames); } static inline void set_df_colnames(SEXP R_df, SEXP R_colnames) { set_list_names(R_df, R_colnames); } static inline void set_list_as_df(SEXP R_list) { setAttrib(R_list, R_ClassSymbol, mkString("data.frame")); } // structures_dataframes.c static inline SEXP make_dataframe_default_colnames(const int n) { R_INIT; int i; int buflen; SEXP ret; buflen = (int) (ceil(log10((double)n)) + 1.); char *buf = malloc(buflen * sizeof(*buf)); buf[0] = 'X'; newRlist(ret, n); for (i=0; i<n; i++) { sprintf(buf+1, "%d", i+1); buflen = (int) (ceil(log10((double)i+2)) + 1.); buflen = RNACIMAX(buflen, 2); SET_VECTOR_ELT(ret, i, mkCharLen(buf, buflen)); } free(buf); R_END; return ret; } static inline SEXP make_dataframe_default_rownames(int n) { R_INIT; int i; SEXP ret_names; newRvec(ret_names, n, "int"); for(i=0; i<n; i++) INT(ret_names,i) = i + 1; R_END; return ret_names; } static inline SEXP make_dataframe(SEXP R_rownames, SEXP R_colnames, int n, ...) { R_INIT; int i; SEXP R_df; SEXP R_default_rownames; SEXP R_default_colnames; SEXP tmp; va_list listPointer; // Construct list newRlist(R_df, n); va_start(listPointer, n); for(i=0; i<n; i++) { tmp = va_arg(listPointer, SEXP); SET_VECTOR_ELT(R_df, i, tmp); } va_end(listPointer); // Set names set_list_as_df(R_df); if (is_Rnull(R_rownames)) { R_default_rownames = make_dataframe_default_rownames(n); set_df_rownames(R_df, R_default_rownames); } else set_df_rownames(R_df, R_rownames); if (is_Rnull(R_colnames)) { R_default_colnames = make_dataframe_default_colnames(n); set_df_colnames(R_df, R_default_colnames); } else set_df_colnames(R_df, R_colnames); R_END; return R_df; } // structures_lists.c static inline SEXP make_list_names(int n, ...) { R_INIT; int i; char *tmp; SEXP R_list_names; va_list listPointer; newRvec(R_list_names, n, "str"); va_start(listPointer, n); for(i=0; i<n; i++) { tmp = va_arg(listPointer, char *); SET_STRING_ELT(R_list_names, i, mkChar(tmp)); } va_end(listPointer); R_END; return R_list_names; } static inline SEXP make_list(SEXP R_list_names, const int n, ...) { R_INIT; int i; /* const int n = LENGTH(R_list_names);*/ SEXP tmp, R_list; va_list listPointer; newRlist(R_list, n); va_start(listPointer, n); for(i=0; i<n; i++) { tmp = va_arg(listPointer, SEXP); SET_VECTOR_ELT(R_list, i, tmp); } va_end(listPointer); /* setAttrib(R_list, R_NamesSymbol, R_list_names);*/ if (!is_Rnull(R_list_names)) set_list_names(R_list, R_list_names); R_END; return R_list; } #endif
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program // to determine the following heuristic. // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, // unless it has been specialized by the user or for a given architecture. // Note that the condition rhs.rows()>0 was required because lazy produc is (was?) not happy with empty inputs. // I'm not sure it is still required. if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
openmp-ex07.c
#include <stdio.h> #include <unistd.h> #include <omp.h> int main(void) { int num_threads, my_thread; num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); printf ("\"You're all individuals!\" said %d of %d.\n", my_thread, num_threads); #pragma omp parallel private(num_threads,my_thread) { num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); sleep(1); printf("\"Yes, we're all individuals!\" replied %d of %d, sleepily.\n", my_thread, num_threads); } /* But then what happens when we try to use the original variable again: do * any of the private writes affect it? */ printf ("\"I'm not,\" said %d of %d.\n", my_thread, num_threads); return 0; }
stats.c
//----------------------------------------------------------------------------- // stats.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (Build 5.1.001) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // 05/10/18 (Build 5.1.013) // 04/01/20 (Build 5.1.015) // Author: L. Rossman (EPA) // R. Dickinson (CDM) // // Simulation statistics functions. // // Build 5.1.007: // - Exfiltration losses added to storage node statistics. // // Build 5.1.008: // - Support for updating groundwater statistics added. // - Support for updating maximum reported nodal depths added. // - OpenMP parallelization applied to updating node and link flow statistics. // - Updating of time that conduit is upstrm/dnstrm full was modified. // // Build 5.1.011: // - Surcharging is now evaluated only under dynamic wave flow routing and // storage nodes cannot be classified as surcharged. // // Build 5.1.012: // - Time step statistics now evaluated only in non-steady state periods. // - Check for full conduit flow now accounts for number of barrels. // // Build 5.1.013: // - Include omp.h protected against lack of compiler support for OpenMP. // - Statistics on impervious and pervious runoff totals added. // - Storage nodes with a non-zero surcharge depth (e.g. enclosed tanks) // can now be classified as being surcharged. // // Build 5.1.015: // - Fixes bug in summary statistics when Report Start date > Start Date. // - Fixes failure to initialize all subcatchment groundwater statistics. // - Support added for grouped freqency table of routing time steps. //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <string.h> #include <math.h> #include "headers.h" #include "swmm5.h" #if defined(_OPENMP) //(5.1.013) #include <omp.h> #endif //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- #define MAX_STATS 5 static TSysStats SysStats; static TMaxStats MaxMassBalErrs[MAX_STATS]; static TMaxStats MaxCourantCrit[MAX_STATS]; static TMaxStats MaxFlowTurns[MAX_STATS]; static double SysOutfallFlow; //----------------------------------------------------------------------------- // Exportable variables (shared with statsrpt.c) //----------------------------------------------------------------------------- TSubcatchStats* SubcatchStats; TNodeStats* NodeStats; TLinkStats* LinkStats; TStorageStats* StorageStats; TOutfallStats* OutfallStats; TPumpStats* PumpStats; double MaxOutfallFlow; double MaxRunoffFlow; //----------------------------------------------------------------------------- // Imported variables //----------------------------------------------------------------------------- extern double* NodeInflow; // defined in massbal.c extern double* NodeOutflow; // defined in massbal.c //----------------------------------------------------------------------------- // External functions (declared in funcs.h) //----------------------------------------------------------------------------- // stats_open (called from swmm_start in swmm5.c) // stats_close (called from swmm_end in swmm5.c) // stats_report (called from swmm_end in swmm5.c) // stats_updateSubcatchStats (called from subcatch_getRunoff) // stats_updateGwaterStats (called from gwater_getGroundwater) // stats_updateFlowStats (called from routing_execute) // stats_updateCriticalTimeCount (called from getVariableStep in dynwave.c) // stats_updateMaxNodeDepth (called from output_saveNodeResults) //----------------------------------------------------------------------------- // Local functions //----------------------------------------------------------------------------- static void stats_updateNodeStats(int node, double tStep, DateTime aDate); static void stats_updateLinkStats(int link, double tStep, DateTime aDate); static void stats_findMaxStats(void); static void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x); //============================================================================= int stats_open() // // Input: none // Output: returns an error code // Purpose: opens the simulation statistics system. // { int j, k; double timeStepDelta; //(5.1.015) double logMaxTimeStep; //(5.1.015) double logMinTimeStep; //(5.1.015) // --- set all pointers to NULL NodeStats = NULL; LinkStats = NULL; StorageStats = NULL; OutfallStats = NULL; PumpStats = NULL; // --- allocate memory for & initialize subcatchment statistics SubcatchStats = NULL; if ( Nobjects[SUBCATCH] > 0 ) { SubcatchStats = (TSubcatchStats *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatchStats)); if ( !SubcatchStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (j=0; j<Nobjects[SUBCATCH]; j++) { SubcatchStats[j].precip = 0.0; SubcatchStats[j].runon = 0.0; SubcatchStats[j].evap = 0.0; SubcatchStats[j].infil = 0.0; SubcatchStats[j].runoff = 0.0; SubcatchStats[j].maxFlow = 0.0; SubcatchStats[j].impervRunoff = 0.0; //(5.1.013) SubcatchStats[j].pervRunoff = 0.0; // } for (j=0; j<Nobjects[SUBCATCH]; j++) { if ( Subcatch[j].groundwater == NULL ) continue; Subcatch[j].groundwater->stats.avgUpperMoist = 0.0; Subcatch[j].groundwater->stats.avgWaterTable = 0.0; Subcatch[j].groundwater->stats.infil = 0.0; Subcatch[j].groundwater->stats.latFlow = 0.0; Subcatch[j].groundwater->stats.deepFlow = 0.0; Subcatch[j].groundwater->stats.evap = 0.0; Subcatch[j].groundwater->stats.maxFlow = 0.0; Subcatch[j].groundwater->stats.finalUpperMoist = 0.0; //(5.1.015) Subcatch[j].groundwater->stats.finalWaterTable = 0.0; // } } // --- allocate memory for node & link stats if ( Nobjects[LINK] > 0 ) { NodeStats = (TNodeStats *) calloc(Nobjects[NODE], sizeof(TNodeStats)); LinkStats = (TLinkStats *) calloc(Nobjects[LINK], sizeof(TLinkStats)); if ( !NodeStats || !LinkStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } } // --- initialize node stats if ( NodeStats ) for ( j = 0; j < Nobjects[NODE]; j++ ) { NodeStats[j].avgDepth = 0.0; NodeStats[j].maxDepth = 0.0; NodeStats[j].maxDepthDate = StartDateTime; NodeStats[j].maxRptDepth = 0.0; NodeStats[j].volFlooded = 0.0; NodeStats[j].timeFlooded = 0.0; NodeStats[j].timeSurcharged = 0.0; NodeStats[j].timeCourantCritical = 0.0; NodeStats[j].totLatFlow = 0.0; NodeStats[j].maxLatFlow = 0.0; NodeStats[j].maxInflow = 0.0; NodeStats[j].maxOverflow = 0.0; NodeStats[j].maxPondedVol = 0.0; NodeStats[j].maxInflowDate = StartDateTime; NodeStats[j].maxOverflowDate = StartDateTime; } // --- initialize link stats if ( LinkStats ) for ( j = 0; j < Nobjects[LINK]; j++ ) { LinkStats[j].maxFlow = 0.0; LinkStats[j].maxVeloc = 0.0; LinkStats[j].maxDepth = 0.0; LinkStats[j].timeSurcharged = 0.0; LinkStats[j].timeFullUpstream = 0.0; LinkStats[j].timeFullDnstream = 0.0; LinkStats[j].timeFullFlow = 0.0; LinkStats[j].timeCapacityLimited = 0.0; LinkStats[j].timeCourantCritical = 0.0; for (k=0; k<MAX_FLOW_CLASSES; k++) LinkStats[j].timeInFlowClass[k] = 0.0; LinkStats[j].flowTurns = 0; LinkStats[j].flowTurnSign = 0; } // --- allocate memory for & initialize storage unit statistics if ( Nnodes[STORAGE] > 0 ) { StorageStats = (TStorageStats *) calloc(Nnodes[STORAGE], sizeof(TStorageStats)); if ( !StorageStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( k = 0; k < Nobjects[NODE]; k++ ) { if ( Node[k].type != STORAGE ) continue; j = Node[k].subIndex; StorageStats[j].initVol = Node[k].newVolume; StorageStats[j].avgVol = 0.0; StorageStats[j].maxVol = 0.0; StorageStats[j].maxFlow = 0.0; StorageStats[j].evapLosses = 0.0; StorageStats[j].exfilLosses = 0.0; StorageStats[j].maxVolDate = StartDateTime; } } // --- allocate memory for & initialize outfall statistics if ( Nnodes[OUTFALL] > 0 ) { OutfallStats = (TOutfallStats *) calloc(Nnodes[OUTFALL], sizeof(TOutfallStats)); if ( !OutfallStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nnodes[OUTFALL]; j++ ) { OutfallStats[j].avgFlow = 0.0; OutfallStats[j].maxFlow = 0.0; OutfallStats[j].totalPeriods = 0; if ( Nobjects[POLLUT] > 0 ) { OutfallStats[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); if ( !OutfallStats[j].totalLoad ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (k=0; k<Nobjects[POLLUT]; k++) OutfallStats[j].totalLoad[k] = 0.0; } else OutfallStats[j].totalLoad = NULL; } } // --- allocate memory & initialize pumping statistics if ( Nlinks[PUMP] > 0 ) { PumpStats = (TPumpStats *) calloc(Nlinks[PUMP], sizeof(TPumpStats)); if ( !PumpStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nlinks[PUMP]; j++ ) { PumpStats[j].utilized = 0.0; PumpStats[j].minFlow = 0.0; PumpStats[j].avgFlow = 0.0; PumpStats[j].maxFlow = 0.0; PumpStats[j].volume = 0.0; PumpStats[j].energy = 0.0; PumpStats[j].startUps = 0; PumpStats[j].offCurveLow = 0.0; PumpStats[j].offCurveHigh = 0.0; } } // --- initialize system stats MaxRunoffFlow = 0.0; MaxOutfallFlow = 0.0; SysStats.maxTimeStep = 0.0; SysStats.minTimeStep = RouteStep; SysStats.avgTimeStep = 0.0; SysStats.avgStepCount = 0.0; SysStats.steadyStateCount = 0.0; // --- divide range between min and max routing time steps into //(5.1.015) // equal intervals using a logarithmic scale // logMaxTimeStep = log10(RouteStep); // logMinTimeStep = log10(MinRouteStep); // timeStepDelta = (logMaxTimeStep - logMinTimeStep) / (TIMELEVELS-1); // SysStats.timeStepIntervals[0] = RouteStep; // for (j = 1; j < TIMELEVELS; j++) // { // SysStats.timeStepIntervals[j] = // pow(10., logMaxTimeStep - j * timeStepDelta); // SysStats.timeStepCounts[j] = 0; // } // SysStats.timeStepIntervals[TIMELEVELS - 1] = MinRouteStep; // return 0; } //============================================================================= void stats_close() // // Input: none // Output: // Purpose: closes the simulation statistics system. // { int j; FREE(SubcatchStats); FREE(NodeStats); FREE(LinkStats); FREE(StorageStats); if ( OutfallStats ) { for ( j=0; j<Nnodes[OUTFALL]; j++ ) FREE(OutfallStats[j].totalLoad); FREE(OutfallStats); } FREE(PumpStats); } //============================================================================= void stats_report() // // Input: none // Output: none // Purpose: reports simulation statistics. // { // --- report flow routing accuracy statistics if ( Nobjects[LINK] > 0 && RouteModel != NO_ROUTING ) { stats_findMaxStats(); report_writeMaxStats(MaxMassBalErrs, MaxCourantCrit, MAX_STATS); report_writeMaxFlowTurns(MaxFlowTurns, MAX_STATS); report_writeSysStats(&SysStats); } // --- report summary statistics statsrpt_writeReport(); } //============================================================================= void stats_updateSubcatchStats(int j, double rainVol, double runonVol, double evapVol, double infilVol, double impervVol, double pervVol, double runoffVol, double runoff) // // Input: j = subcatchment index // rainVol = rainfall + snowfall volume (ft3) // runonVol = runon volume from other subcatchments (ft3) // evapVol = evaporation volume (ft3) // infilVol = infiltration volume (ft3) // impervVol = impervious runoff volume (ft3) // pervVol = pervious runoff volume (ft3) // runoffVol = runoff volume (ft3) // runoff = runoff rate (cfs) // Output: none // Purpose: updates totals of runoff components for a specific subcatchment. // { SubcatchStats[j].precip += rainVol; SubcatchStats[j].runon += runonVol; SubcatchStats[j].evap += evapVol; SubcatchStats[j].infil += infilVol; SubcatchStats[j].runoff += runoffVol; SubcatchStats[j].maxFlow = MAX(SubcatchStats[j].maxFlow, runoff); SubcatchStats[j].impervRunoff += impervVol; //(5.1.013) SubcatchStats[j].pervRunoff += pervVol; // } //============================================================================= void stats_updateGwaterStats(int j, double infil, double evap, double latFlow, double deepFlow, double theta, double waterTable, double tStep) { Subcatch[j].groundwater->stats.infil += infil * tStep; Subcatch[j].groundwater->stats.evap += evap * tStep; Subcatch[j].groundwater->stats.latFlow += latFlow * tStep; Subcatch[j].groundwater->stats.deepFlow += deepFlow * tStep; Subcatch[j].groundwater->stats.avgUpperMoist += theta * tStep; Subcatch[j].groundwater->stats.avgWaterTable += waterTable * tStep; Subcatch[j].groundwater->stats.finalUpperMoist = theta; Subcatch[j].groundwater->stats.finalWaterTable = waterTable; if ( fabs(latFlow) > fabs(Subcatch[j].groundwater->stats.maxFlow) ) { Subcatch[j].groundwater->stats.maxFlow = latFlow; } } //============================================================================= void stats_updateMaxRunoff() // // Input: none // Output: updates global variable MaxRunoffFlow // Purpose: updates value of maximum system runoff rate. // { int j; double sysRunoff = 0.0; for (j=0; j<Nobjects[SUBCATCH]; j++) sysRunoff += Subcatch[j].newRunoff; MaxRunoffFlow = MAX(MaxRunoffFlow, sysRunoff); } //============================================================================= void stats_updateMaxNodeDepth(int j, double depth) // // Input: j = node index // depth = water depth at node at current reporting time (ft) // Output: none // Purpose: updates a node's maximum depth recorded at reporting times. // { if ( NodeStats != NULL ) NodeStats[j].maxRptDepth = MAX(NodeStats[j].maxRptDepth, depth); } //============================================================================= void stats_updateFlowStats(double tStep, DateTime aDate, int stepCount, int steadyState) // // Input: tStep = routing time step (sec) // aDate = current date/time // stepCount = # steps required to solve routing at current time period // steadyState = TRUE if steady flow conditions exist // Output: none // Purpose: updates various flow routing statistics at current time period. // { int j; // --- update stats only after reporting period begins if ( aDate < ReportStart ) return; SysOutfallFlow = 0.0; // --- update node & link stats #pragma omp parallel num_threads(NumThreads) { #pragma omp for for ( j=0; j<Nobjects[NODE]; j++ ) stats_updateNodeStats(j, tStep, aDate); #pragma omp for for ( j=0; j<Nobjects[LINK]; j++ ) stats_updateLinkStats(j, tStep, aDate); } // --- update count of times in steady state ReportStepCount++; SysStats.steadyStateCount += steadyState; // --- update time step stats if not in steady state if ( steadyState == FALSE ) { // --- skip initial time step for min. value) if ( OldRoutingTime > 0 ) { SysStats.minTimeStep = MIN(SysStats.minTimeStep, tStep); // --- locate interval that logged time step falls in //(5.1.015) // and update its count // for (j = 1; j < TIMELEVELS; j++) // if (tStep >= SysStats.timeStepIntervals[j]) // { // SysStats.timeStepCounts[j]++; // break; // } // } SysStats.avgTimeStep += tStep; SysStats.maxTimeStep = MAX(SysStats.maxTimeStep, tStep); // --- update iteration step count stats SysStats.avgStepCount += stepCount; } // --- update max. system outfall flow MaxOutfallFlow = MAX(MaxOutfallFlow, SysOutfallFlow); } //============================================================================= void stats_updateCriticalTimeCount(int node, int link) // // Input: node = node index // link = link index // Output: none // Purpose: updates count of times a node or link was time step-critical. // { if ( node >= 0 ) NodeStats[node].timeCourantCritical += 1.0; else if ( link >= 0 ) LinkStats[link].timeCourantCritical += 1.0; } //============================================================================= void stats_updateNodeStats(int j, double tStep, DateTime aDate) // // Input: j = node index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a node. // { int k, p; double newVolume = Node[j].newVolume; double newDepth = Node[j].newDepth; double yCrown = Node[j].crownElev - Node[j].invertElev; int canPond = (AllowPonding && Node[j].pondedArea > 0.0); // --- update depth statistics NodeStats[j].avgDepth += newDepth; if ( newDepth > NodeStats[j].maxDepth ) { NodeStats[j].maxDepth = newDepth; NodeStats[j].maxDepthDate = aDate; } // --- update flooding, ponding, and surcharge statistics if ( Node[j].type != OUTFALL ) { if ( newVolume > Node[j].fullVolume || Node[j].overflow > 0.0 ) { NodeStats[j].timeFlooded += tStep; NodeStats[j].volFlooded += Node[j].overflow * tStep; if ( canPond ) NodeStats[j].maxPondedVol = MAX(NodeStats[j].maxPondedVol, (newVolume - Node[j].fullVolume)); } // --- for dynamic wave routing, classify a node as //(5.1.013) // surcharged if its water level exceeds its crown elev. if (RouteModel == DW) //(5.1.013) { if ((Node[j].type != STORAGE || Node[j].surDepth > 0.0) && //(5.1.013) newDepth + Node[j].invertElev + FUDGE >= Node[j].crownElev) { NodeStats[j].timeSurcharged += tStep; } } } // --- update storage statistics if ( Node[j].type == STORAGE ) { k = Node[j].subIndex; StorageStats[k].avgVol += newVolume; StorageStats[k].evapLosses += Storage[Node[j].subIndex].evapLoss; StorageStats[k].exfilLosses += Storage[Node[j].subIndex].exfilLoss; newVolume = MIN(newVolume, Node[j].fullVolume); if ( newVolume > StorageStats[k].maxVol ) { StorageStats[k].maxVol = newVolume; StorageStats[k].maxVolDate = aDate; } StorageStats[k].maxFlow = MAX(StorageStats[k].maxFlow, Node[j].outflow); } // --- update outfall statistics if ( Node[j].type == OUTFALL ) { k = Node[j].subIndex; if ( Node[j].inflow >= MIN_RUNOFF_FLOW ) { OutfallStats[k].avgFlow += Node[j].inflow; OutfallStats[k].maxFlow = MAX(OutfallStats[k].maxFlow, Node[j].inflow); OutfallStats[k].totalPeriods++; } for (p=0; p<Nobjects[POLLUT]; p++) { OutfallStats[k].totalLoad[p] += Node[j].inflow * Node[j].newQual[p] * tStep; } SysOutfallFlow += Node[j].inflow; } // --- update inflow statistics NodeStats[j].totLatFlow += ( (Node[j].oldLatFlow + Node[j].newLatFlow) * 0.5 * tStep ); if ( fabs(Node[j].newLatFlow) > fabs(NodeStats[j].maxLatFlow) ) NodeStats[j].maxLatFlow = Node[j].newLatFlow; if ( Node[j].inflow > NodeStats[j].maxInflow ) { NodeStats[j].maxInflow = Node[j].inflow; NodeStats[j].maxInflowDate = aDate; } // --- update overflow statistics if ( Node[j].overflow > NodeStats[j].maxOverflow ) { NodeStats[j].maxOverflow = Node[j].overflow; NodeStats[j].maxOverflowDate = aDate; } } //============================================================================= void stats_updateLinkStats(int j, double tStep, DateTime aDate) // // Input: j = link index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a link. // { int k; double q, v; double dq; // --- update max. flow dq = Link[j].newFlow - Link[j].oldFlow; q = fabs(Link[j].newFlow); if ( q > LinkStats[j].maxFlow ) { LinkStats[j].maxFlow = q; LinkStats[j].maxFlowDate = aDate; } // --- update max. velocity v = link_getVelocity(j, q, Link[j].newDepth); if ( v > LinkStats[j].maxVeloc ) { LinkStats[j].maxVeloc = v; } // --- update max. depth if ( Link[j].newDepth > LinkStats[j].maxDepth ) { LinkStats[j].maxDepth = Link[j].newDepth; } if ( Link[j].type == PUMP ) { if ( q >= Link[j].qFull ) LinkStats[j].timeFullFlow += tStep; if ( q > MIN_RUNOFF_FLOW ) { k = Link[j].subIndex; PumpStats[k].minFlow = MIN(PumpStats[k].minFlow, q); PumpStats[k].maxFlow = LinkStats[j].maxFlow; PumpStats[k].avgFlow += q; PumpStats[k].volume += q*tStep; PumpStats[k].utilized += tStep; PumpStats[k].energy += link_getPower(j)*tStep/3600.0; if ( Link[j].flowClass == DN_DRY ) PumpStats[k].offCurveLow += tStep; if ( Link[j].flowClass == UP_DRY ) PumpStats[k].offCurveHigh += tStep; if ( Link[j].oldFlow < MIN_RUNOFF_FLOW ) PumpStats[k].startUps++; PumpStats[k].totalPeriods++; LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; } } else if ( Link[j].type == CONDUIT ) { // --- update time under normal flow & inlet control if ( Link[j].normalFlow ) LinkStats[j].timeNormalFlow += tStep; if ( Link[j].inletControl ) LinkStats[j].timeInletControl += tStep; // --- update flow classification distribution k = Link[j].flowClass; if ( k >= 0 && k < MAX_FLOW_CLASSES ) { ++LinkStats[j].timeInFlowClass[k]; } // --- update time conduit is full k = Link[j].subIndex; if ( q >= Link[j].qFull * (double)Conduit[k].barrels ) LinkStats[j].timeFullFlow += tStep; if ( Conduit[k].capacityLimited ) LinkStats[j].timeCapacityLimited += tStep; switch (Conduit[k].fullState) { case ALL_FULL: LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; break; case UP_FULL: LinkStats[j].timeFullUpstream += tStep; break; case DN_FULL: LinkStats[j].timeFullDnstream += tStep; } } // --- update flow turn count k = LinkStats[j].flowTurnSign; LinkStats[j].flowTurnSign = SGN(dq); if ( fabs(dq) > 0.001 && k * LinkStats[j].flowTurnSign < 0 ) LinkStats[j].flowTurns++; } //============================================================================= void stats_findMaxStats() // // Input: none // Output: none // Purpose: finds nodes & links with highest mass balance errors // & highest times Courant time-step critical. // { int j; double x; double stepCount = ReportStepCount - SysStats.steadyStateCount; //(5.1.015) // --- initialize max. stats arrays for (j=0; j<MAX_STATS; j++) { MaxMassBalErrs[j].objType = NODE; MaxMassBalErrs[j].index = -1; MaxMassBalErrs[j].value = -1.0; MaxCourantCrit[j].index = -1; MaxCourantCrit[j].value = -1.0; MaxFlowTurns[j].index = -1; MaxFlowTurns[j].value = -1.0; } // --- find links with most flow turns if ( stepCount > 2 ) //(5.1.015) { for (j=0; j<Nobjects[LINK]; j++) { x = 100.0 * LinkStats[j].flowTurns / (2./3.*(stepCount-2)); //(5.1.015) stats_updateMaxStats(MaxFlowTurns, LINK, j, x); } } // --- find nodes with largest mass balance errors for (j=0; j<Nobjects[NODE]; j++) { // --- skip terminal nodes and nodes with negligible inflow if ( Node[j].degree <= 0 ) continue; if ( NodeInflow[j] <= 0.1 ) continue; // --- evaluate mass balance error // (Note: NodeInflow & NodeOutflow include any initial and final // stored volumes, respectively). if ( NodeInflow[j] > 0.0 ) x = 1.0 - NodeOutflow[j] / NodeInflow[j]; else if ( NodeOutflow[j] > 0.0 ) x = -1.0; else x = 0.0; stats_updateMaxStats(MaxMassBalErrs, NODE, j, 100.0*x); } // --- stop if not using a variable time step if ( RouteModel != DW || CourantFactor == 0.0 ) return; // --- find nodes most frequently Courant critical if ( stepCount == 0 ) return; //(5.1.015) for (j=0; j<Nobjects[NODE]; j++) { x = NodeStats[j].timeCourantCritical / stepCount; //(5.1.015) stats_updateMaxStats(MaxCourantCrit, NODE, j, 100.0*x); } // --- find links most frequently Courant critical for (j=0; j<Nobjects[LINK]; j++) { x = LinkStats[j].timeCourantCritical / stepCount; //(5.1.015) stats_updateMaxStats(MaxCourantCrit, LINK, j, 100.0*x); } } //============================================================================= void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x) // // Input: maxStats[] = array of critical statistics values // i = object category (NODE or LINK) // j = object index // x = value of statistic for the object // Output: none // Purpose: updates the collection of most critical statistics // { int k; TMaxStats maxStats1, maxStats2; maxStats1.objType = i; maxStats1.index = j; maxStats1.value = x; for (k=0; k<MAX_STATS; k++) { if ( fabs(maxStats1.value) > fabs(maxStats[k].value) ) { maxStats2 = maxStats[k]; maxStats[k] = maxStats1; maxStats1 = maxStats2; } } }
GB_binop__bxnor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int16) // C=scalar+B GB (_bind1st__bxnor_int16) // C=scalar+B' GB (_bind1st_tran__bxnor_int16) // C=A+scalar GB (_bind2nd__bxnor_int16) // C=A'+scalar GB (_bind2nd_tran__bxnor_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT16 || GxB_NO_BXNOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(t1-2,3)),ceild(2*t1-2*t2-1,3)),ceild(16*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(8*t1+Ny+7,24)),floord(16*t2+Ny+3,24)),floord(16*t1-16*t2+Nz+Ny+5,24));t3++) { for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32)),ceild(24*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(8*t1+Nx+7,32)),floord(16*t2+Nx+3,32)),floord(24*t3+Nx+11,32)),floord(16*t1-16*t2+Nz+Nx+5,32));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),6*t3+4),8*t4+6);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd63_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack4_sse(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_pack4_sse(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd43_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd43_transform_input_pack4_sse(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd43_transform_output_pack4_sse(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_binop__div_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__div_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__div_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__div_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fc32) // A*D function (colscale): GB (_AxD__div_fc32) // D*A function (rowscale): GB (_DxB__div_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__div_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__div_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fc32) // C=scalar+B GB (_bind1st__div_fc32) // C=scalar+B' GB (_bind1st_tran__div_fc32) // C=A+scalar GB (_bind2nd__div_fc32) // C=A'+scalar GB (_bind2nd_tran__div_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_div (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_div (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_FC32 || GxB_NO_DIV_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_fc32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_div (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_div (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_div (x, aij) ; \ } GrB_Info GB (_bind1st_tran__div_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_div (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__div_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
beta_projectors.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file beta_projectors.h * * \brief Contains declaration and implementation of sirius::Beta_projectors class. */ #ifndef __BETA_PROJECTORS_H__ #define __BETA_PROJECTORS_H__ #include "communicator.hpp" #include "Unit_cell/unit_cell.h" #include "wave_functions.hpp" #include "simulation_context.h" #include "beta_projectors_base.h" namespace sirius { /// Stores <G+k | beta> expansion class Beta_projectors: public Beta_projectors_base<1> { protected: /// Generate plane-wave coefficients for beta-projectors of atom types. void generate_pw_coefs_t(std::vector<int>& igk__) { PROFILE("sirius::Beta_projectors::generate_pw_coefs_t"); if (!num_beta_t()) { return; } auto& comm = gkvec_.comm(); auto& beta_radial_integrals = ctx_.beta_ri(); std::vector<double_complex> z(ctx_.unit_cell().lmax() + 1); for (int l = 0; l <= ctx_.unit_cell().lmax(); l++) { z[l] = std::pow(double_complex(0, -1), l) * fourpi / std::sqrt(ctx_.unit_cell().omega()); } /* compute <G+k|beta> */ #pragma omp parallel for for (int igkloc = 0; igkloc < num_gkvec_loc(); igkloc++) { int igk = igk__[igkloc]; /* vs = {r, theta, phi} */ auto vs = SHT::spherical_coordinates(gkvec_.gkvec_cart(igk)); /* compute real spherical harmonics for G+k vector */ std::vector<double> gkvec_rlm(Utils::lmmax(ctx_.unit_cell().lmax())); SHT::spherical_harmonics(ctx_.unit_cell().lmax(), vs[1], vs[2], &gkvec_rlm[0]); for (int iat = 0; iat < ctx_.unit_cell().num_atom_types(); iat++) { auto& atom_type = ctx_.unit_cell().atom_type(iat); /* get all values of radial integrals */ auto ri_val = beta_radial_integrals.values(iat, vs[0]); for (int xi = 0; xi < atom_type.mt_basis_size(); xi++) { int l = atom_type.indexb(xi).l; int lm = atom_type.indexb(xi).lm; int idxrf = atom_type.indexb(xi).idxrf; pw_coeffs_t_[0](igkloc, atom_type.offset_lo() + xi) = z[l] * gkvec_rlm[lm] * ri_val(idxrf); } } } if (ctx_.control().print_checksum_) { auto c1 = pw_coeffs_t_[0].checksum(); comm.allreduce(&c1, 1); if (comm.rank() == 0) { print_checksum("beta_pw_coeffs_t", c1); } } if (ctx_.processing_unit() == GPU) { /* beta projectors for atom types will be stored on GPU for the entire run */ reallocate_pw_coeffs_t_on_gpu_ = false; pw_coeffs_t_[0].allocate(memory_t::device); pw_coeffs_t_[0].copy<memory_t::host, memory_t::device>(); } } public: Beta_projectors(Simulation_context& ctx__, Gvec const& gkvec__, std::vector<int>& igk__) : Beta_projectors_base<1>(ctx__, gkvec__, igk__) { PROFILE("sirius::Beta_projectors::Beta_projectors"); generate_pw_coefs_t(igk__); } void generate(int chunk__) { Beta_projectors_base<1>::generate(chunk__, 0); } }; } // namespace #endif
GB_binop__div_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint64) // A*D function (colscale): GB (_AxD__div_uint64) // D*A function (rowscale): GB (_DxB__div_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint64) // C=scalar+B GB (_bind1st__div_uint64) // C=scalar+B' GB (_bind1st_tran__div_uint64) // C=A+scalar GB (_bind2nd__div_uint64) // C=A'+scalar GB (_bind2nd_tran__div_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT64 || GxB_NO_DIV_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__div_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__div_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 64) ; \ } GrB_Info GB (_bind1st_tran__div_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 64) ; \ } GrB_Info GB (_bind2nd_tran__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rose_v1_complexCondition.c
// Contributed by Jeff Keasler // 5/24/2010 #include <omp.h> void goo(int numAB) { double *c; double *bufLoc; int k_nom_22; #if 0 #else #pragma omp parallel for private (k_nom_22) firstprivate (numAB) for (k_nom_22 = 0; k_nom_22 <= numAB * numAB * 3 - 1; k_nom_22 += 1) { #endif bufLoc[k_nom_22] = c[k_nom_22]; } return ; }
BenchUtils.h
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <chrono> #include <functional> #include <vector> #include <immintrin.h> #ifdef USE_BLAS #if __APPLE__ // not sure whether need to differentiate TARGET_OS_MAC or TARGET_OS_IPHONE, // etc. #include <Accelerate/Accelerate.h> #else #include <cblas.h> #endif #endif #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MKL #include <mkl.h> #endif #include "./AlignedVec.h" #include "fbgemm/FbgemmBuild.h" #include "fbgemm/FbgemmPackMatrixB.h" #include "src/RefImplementations.h" namespace fbgemm { template <typename T> void randFill(aligned_vector<T>& vec, T low, T high); void llc_flush(std::vector<char>& llc); // Same as omp_get_max_threads() when OpenMP is available, otherwise 1 int fbgemm_get_max_threads(); // Same as omp_get_num_threads() when OpenMP is available, otherwise 1 int fbgemm_get_num_threads(); // Same as omp_get_thread_num() when OpenMP is available, otherwise 0 int fbgemm_get_thread_num(); template <typename T> NOINLINE float cache_evict(const T& vec) { auto const size = vec.size(); auto const elemSize = sizeof(typename T::value_type); auto const dataSize = size * elemSize; const char* data = reinterpret_cast<const char*>(vec.data()); constexpr int CACHE_LINE_SIZE = 64; // Not having this dummy computation significantly slows down the computation // that follows. float dummy = 0.0f; for (std::size_t i = 0; i < dataSize; i += CACHE_LINE_SIZE) { dummy += data[i] * 1.0f; _mm_mfence(); #ifndef _MSC_VER asm volatile("" ::: "memory"); #endif _mm_clflush(&data[i]); } return dummy; } /** * Parse application command line arguments * */ int parseArgumentInt( int argc, const char* argv[], const char* arg, int non_exist_val, int def_val); bool parseArgumentBool( int argc, const char* argv[], const char* arg, bool def_val); namespace { struct empty_flush { void operator()() const {} }; } // namespace /** * @param Fn functor to execute * @param Fe data eviction functor */ template <class Fn, class Fe = std::function<void()>> double measureWithWarmup( Fn&& fn, int warmupIterations, int measuredIterations, const Fe& fe = empty_flush(), bool useOpenMP = false) { for (int i = 0; i < warmupIterations; ++i) { // Evict data first fe(); fn(); } double ttot = 0.0; #ifdef _OPENMP #pragma omp parallel if (useOpenMP) { #endif for (int i = 0; i < measuredIterations; ++i) { std::chrono::time_point<std::chrono::high_resolution_clock> start, end; const auto thread_id = useOpenMP ? fbgemm_get_thread_num() : 0; if (thread_id == 0) { fe(); } #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif start = std::chrono::high_resolution_clock::now(); fn(); #ifdef _OPENMP if (useOpenMP) { #pragma omp barrier } #endif end = std::chrono::high_resolution_clock::now(); auto dur = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start); if (thread_id == 0) { // TODO: measure load imbalance ttot += dur.count(); } } #ifdef _OPENMP } #endif return ttot / 1e9 / measuredIterations; } /* * @brief Out-of-place transposition for M*N matrix ref. * @param M number of rows in input * @param K number of columns in input */ template <typename T> void transpose_matrix( int M, int N, const T* src, int ld_src, T* dst, int ld_dst) { for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { dst[i * ld_dst + j] = src[i + j * ld_src]; } } // for each output row } /* * @brief In-place transposition for nxk matrix ref. * @param n number of rows in input (number of columns in output) * @param k number of columns in input (number of rows in output) */ template <typename T> void transpose_matrix(T* ref, int n, int k) { std::vector<T> local(n * k); transpose_matrix(n, k, ref, k, local.data(), n); memcpy(ref, local.data(), n * k * sizeof(T)); } #ifdef USE_MKL void test_xerbla(char* srname, const int* info, int); #endif #define dataset 1 template <typename btype> void performance_test( int num_instances, bool flush, int repetitions, bool is_mkl) { #ifdef USE_MKL mkl_set_xerbla((XerblaEntry)test_xerbla); #endif (void)is_mkl; // Suppress unused variable warning float alpha = 1.f, beta = 1.f; matrix_op_t btran = matrix_op_t::Transpose; #if dataset == 1 const int NITER = (flush) ? 10 : 100; std::vector<std::vector<int>> shapes; for (auto m = 1; m < 120; m++) { // shapes.push_back({m, 128, 512}); shapes.push_back({m, 512, 512}); } #elif dataset == 2 const int NITER = (flush) ? 10 : 100; #include "shapes_dataset.h" #else flush = false; constexpr int NITER = 1; std::vector<std::vector<int>> shapes; std::random_device r; std::default_random_engine generator(r()); std::uniform_int_distribution<int> dm(1, 100); std::uniform_int_distribution<int> dnk(1, 1024); for (int i = 0; i < 1000; i++) { int m = dm(generator); int n = dnk(generator); int k = dnk(generator); shapes.push_back({m, n, k}); } #endif std::string type; double gflops, gbs, ttot; for (auto s : shapes) { int m = s[0]; int n = s[1]; int k = s[2]; // initialize with small numbers aligned_vector<int> Aint(m * k); randFill(Aint, 0, 4); std::vector<aligned_vector<float>> A; for (int i = 0; i < num_instances; ++i) { A.push_back(aligned_vector<float>(Aint.begin(), Aint.end())); } aligned_vector<int> Bint(k * n); randFill(Bint, 0, 4); aligned_vector<float> B(Bint.begin(), Bint.end()); std::vector<std::unique_ptr<PackedGemmMatrixB<btype>>> Bp; for (int i = 0; i < num_instances; ++i) { Bp.emplace_back(std::unique_ptr<PackedGemmMatrixB<btype>>( new PackedGemmMatrixB<btype>(btran, k, n, alpha, B.data()))); } auto kAligned = ((k * sizeof(float) + 64) & ~63) / sizeof(float); auto nAligned = ((n * sizeof(float) + 64) & ~63) / sizeof(float); std::vector<aligned_vector<float>> Bt(num_instances); auto& Bt_ref = Bt[0]; if (btran == matrix_op_t::Transpose) { Bt_ref.resize(k * nAligned); for (auto row = 0; row < k; ++row) { for (auto col = 0; col < n; ++col) { Bt_ref[row * nAligned + col] = alpha * B[col * k + row]; } } } else { Bt_ref.resize(kAligned * n); for (auto row = 0; row < k; ++row) { for (auto col = 0; col < n; ++col) { Bt_ref[col * kAligned + row] = alpha * B[col * k + row]; } } } for (auto i = 1; i < num_instances; ++i) { Bt[i] = Bt_ref; } std::vector<aligned_vector<float>> C_ref; std::vector<aligned_vector<float>> C_fb; if (beta != 0.0f) { aligned_vector<int> Cint(m * n); randFill(Cint, 0, 4); for (int i = 0; i < num_instances; ++i) { C_ref.push_back(aligned_vector<float>(Cint.begin(), Cint.end())); C_fb.push_back(aligned_vector<float>(Cint.begin(), Cint.end())); } } else { for (int i = 0; i < num_instances; ++i) { C_ref.push_back(aligned_vector<float>(m * n, 1.f)); C_fb.push_back(aligned_vector<float>(m * n, NAN)); } } double nflops = 2.0 * m * n * k; double nbytes = 4.0 * m * k + sizeof(btype) * 1.0 * k * n + 4.0 * m * n; // warm up MKL and fbgemm // check correctness at the same time for (auto w = 0; w < 3; w++) { #if defined(USE_MKL) || defined(USE_BLAS) cblas_sgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, // B is pretransposed, if required by operation m, n, k, 1.0, // Mutliplication by Alpha is done during transpose of B A[0].data(), k, Bt[0].data(), btran == matrix_op_t::NoTranspose ? kAligned : nAligned, beta, C_ref[0].data(), n); #else cblas_sgemm_ref( matrix_op_t::NoTranspose, matrix_op_t::NoTranspose, m, n, k, 1.0, A[0].data(), k, Bt[0].data(), (btran == matrix_op_t::NoTranspose) ? kAligned : nAligned, beta, C_ref[0].data(), n); #endif #ifdef _OPENMP #pragma omp parallel if (num_instances == 1) #endif { int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1; int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0; cblas_gemm_compute( matrix_op_t::NoTranspose, m, A[0].data(), *Bp[0], beta, C_fb[0].data(), tid, num_threads); } #if defined(USE_MKL) || defined(USE_BLAS) // Compare results for (size_t i = 0; i < C_ref[0].size(); i++) { if (std::abs(C_ref[0][i] - C_fb[0][i]) > 1e-3) { fprintf( stderr, "Error: too high diff between fp32 ref %f and fp16 %f at %ld\n", C_ref[0][i], C_fb[0][i], i); return; } } #endif } #ifdef USE_MKL if (is_mkl) { // Gold via MKL sgemm type = "MKL_FP32"; #elif defined(USE_BLAS) type = "BLAS_FP32"; #else type = "REF_FP32"; #endif ttot = measureWithWarmup( [&]() { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); for (int i = 0; i < repetitions; ++i) { #if defined(USE_MKL) || defined(USE_BLAS) cblas_sgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, A[copy].data(), k, Bt[copy].data(), btran == matrix_op_t::NoTranspose ? kAligned : nAligned, beta, C_ref[copy].data(), n); #else cblas_sgemm_ref( matrix_op_t::NoTranspose, matrix_op_t::NoTranspose, m, n, k, 1.0, A[copy].data(), k, Bt[copy].data(), (btran == matrix_op_t::NoTranspose) ? kAligned : nAligned, beta, C_ref[copy].data(), n); #endif } }, 3, NITER, [&]() { if (flush) { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); cache_evict(A[copy]); cache_evict(Bt[copy]); cache_evict(C_ref[copy]); } }, // Use OpenMP if num instances > 1 num_instances > 1); gflops = nflops / ttot / 1e9; gbs = nbytes / ttot / 1e9; printf( "\n%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n", type.c_str(), m, n, k, gflops * repetitions, gbs * repetitions); #ifdef USE_MKL } #endif type = "FBP_" + std::string(typeid(btype).name()); ttot = measureWithWarmup( [&]() { // When executing in data decomposition (single-instance) mode // Different threads will access different regions of the same // matrices. Thus, copy to be used is always 0. The numbers of // threads would be the as number of threads in the parallel // region. // When running in functional decomposition (multi-instance) mode // different matrices are used. The copy to be used selected by // thread_id (thread_num), and the number of threads performance // the compute of the same instance is 1. int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); int num_threads = num_instances == 1 ? fbgemm_get_num_threads() : 1; int tid = num_instances == 1 ? fbgemm_get_thread_num() : 0; for (int i = 0; i < repetitions; ++i) { cblas_gemm_compute( matrix_op_t::NoTranspose, m, A[copy].data(), *Bp[copy], beta, C_fb[copy].data(), tid, num_threads); } }, 3, NITER, [&]() { if (flush) { int copy = num_instances == 1 ? 0 : fbgemm_get_thread_num(); cache_evict(A[copy]); cache_evict(*Bp[copy]); cache_evict(C_fb[copy]); } }, true /*useOpenMP*/); gflops = nflops / ttot / 1e9; gbs = nbytes / ttot / 1e9; printf( "%30s m = %5d n = %5d k = %5d Gflops = %8.4lf GBytes = %8.4lf\n", type.c_str(), m, n, k, gflops * repetitions, gbs * repetitions); } } aligned_vector<float> getRandomSparseVector( unsigned size, float fractionNonZeros = 1.0); template <typename T> aligned_vector<T> getRandomBlockSparseMatrix( int Rows, int Cols, float fractionNonZerosBlocks = 1.0, int RowBlockSize = 4, int ColBlockSize = 1, T low = 0, T high = 9); } // namespace fbgemm
GB_binop__islt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint32) // A*D function (colscale): GB (_AxD__islt_uint32) // D*A function (rowscale): GB (_DxB__islt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint32) // C=scalar+B GB (_bind1st__islt_uint32) // C=scalar+B' GB (_bind1st_tran__islt_uint32) // C=A+scalar GB (_bind2nd__islt_uint32) // C=A'+scalar GB (_bind2nd_tran__islt_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT32 || GxB_NO_ISLT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rg_filter.c
////////////////////////////////////// // Cunren Liang, NASA JPL/Caltech // Copyright 2015-2018... ////////////////////////////////////// #include "resamp.h" #include <fftw3.h> #include <omp.h> #define SWAP4(a) (*(unsigned int *)&(a) = (((*(unsigned int *)&(a) & 0x000000ff) << 24) | ((*(unsigned int *)&(a) & 0x0000ff00) << 8) | ((*(unsigned int *)&(a) >> 8) & 0x0000ff00) | ((*(unsigned int *)&(a) >> 24) & 0x000000ff))) int rg_filter(char *inputfile, int nrg, int naz, int nout, char **outputfile, float *bw, float *bc, int nfilter, int nfft, float beta, int zero_cf, float offset, int byteorder, long imageoffset, long lineoffset){ /* inputfile: input file nrg file width nout: number of output files outputfile: (value_of_out_1, value_of_out_2, value_of_out_3...) output files bw: (value_of_out_1, value_of_out_2, value_of_out_3...) filter bandwidth divided by sampling frequency [0, 1] bc: (value_of_out_1, value_of_out_2, value_of_out_3...) filter center frequency divided by sampling frequency nfilter: number samples of the filter (odd). Reference Value: 65 nfft: number of samples of the FFT. Reference Value: 1024 beta: kaiser window beta. Reference Value: 1.0 zero_cf: if bc != 0.0, move center frequency to zero? 0: Yes (Reference Value). 1: No. offset: offset (in samples) of linear phase for moving center frequency. Reference Value: 0.0 byteorder: (0) LSB, little endian; (1) MSB, big endian of intput file imageoffset: offset from start of the image of input file lineoffset: length of each line of input file */ /////////////////////////////// // int k; // printf("input parameters:"); // printf("%s\n", inputfile); // printf("%d\n", nrg); // printf("%d\n", nout); // for(k =0; k<nout;k++){ // printf("%s\n", outputfile[k]); // printf("%f\n", bw[k]); // printf("%f\n", bc[k]); // } // printf("%d\n", nfilter); // printf("%d\n", nfft); // printf("%f\n", beta); // printf("%d\n", zero_cf); // printf("%f\n", offset); /////////////////////////////// FILE *infp; //slave image to be resampled FILE **outfp; //resampled slave image fcomplex **filter; fcomplex *in; fcomplex **out; fcomplex *tmp; fcomplex *tmp2; fcomplex *tmpf; int *zeroflag; fftwf_plan p_forward; fftwf_plan p_backward; fftwf_plan p_forward_filter; //fftwf_plan p_backward_filter; //int nout; //number of output files //int nrg; //file width //int naz; //file length //int nfft; //fft length //int nfilter; //filter length int hnfilter; //float *bw; //float *bc; //float beta; //kaiser window beta //int zero_cf; //float offset; int argc_mand; int nthreads; float sc; //constant to scale the data read in to avoid large values //during fft and ifft float cf_pha; float t; fcomplex cf; int nblock_in; int nblock_out; int num_block; int i_block; int nblock_in_last; int nblock_out_last; int i, j, i_out; /*****************************************************************************/ //nfilter = 65; //nfft = 1024; //beta = 1.0; //zero_cf = 0; //offset = 0.0; sc = 10000.0; /*****************************************************************************/ infp = openfile(inputfile, "rb"); //naz = file_length(infp, nrg, sizeof(fcomplex)); //fseeko(infp,0L,SEEK_END); //naz = (ftello(infp) - imageoffset) / (lineoffset + nrg*sizeof(fcomplex)); //rewind(infp); printf("file width: %d, file length: %d\n\n", nrg, naz); if(nout < 1){ fprintf(stderr, "there should be at least one output file!\n"); exit(1); } outfp = array1d_FILE(nout); for(i = 0; i < nout; i++){ outfp[i] = openfile(outputfile[i], "wb"); } //check filter length if(nfilter < 3){ fprintf(stderr, "filter length: %d too small!\n", nfilter); exit(1); } if(nfilter % 2 != 1){ fprintf(stderr, "filter length must be odd!\n"); exit(1); } if(byteorder == 0){ printf("inputfile byte order: little endian\n"); } else{ printf("inputfile byte order: big endian\n"); } printf("input file image offset [byte]: %ld\n", imageoffset); printf("input file line offset [byte]: %ld\n", lineoffset); if(imageoffset < 0){ fprintf(stderr, "image offset must be >= 0\n"); exit(1); } if(lineoffset < 0){ fprintf(stderr, "lineoffset offset must be >= 0\n"); exit(1); } //compute block processing parameters hnfilter = (nfilter - 1) / 2; nblock_in = nfft - nfilter + 1; nblock_in += hnfilter; if (nblock_in <= 0){ fprintf(stderr, "fft length too small compared with filter length!\n"); exit(1); } nblock_out = nblock_in - 2 * hnfilter; num_block = (nrg - 2 * hnfilter) / nblock_out; if((nrg - num_block * nblock_out - 2 * hnfilter) != 0){ num_block += 1; } if((nrg - 2 * hnfilter) <= 0){ num_block = 1; } if(num_block == 1){ nblock_out_last = 0; nblock_in_last = nrg; } else{ nblock_out_last = nrg - (num_block - 1) * nblock_out - 2 * hnfilter; nblock_in_last = nblock_out_last + 2 * hnfilter; } //allocate memory filter = array2d_fcomplex(nout, nfft); in = array1d_fcomplex(nrg); out = array2d_fcomplex(nout, nrg); tmp = array1d_fcomplex(nfft); tmp2 = array1d_fcomplex(nfft); tmpf = array1d_fcomplex(nfft); zeroflag = array1d_int(nrg); //as said in the FFTW document, //Typically, the problem will have to involve at least a few thousand data points before threads become beneficial. //so I choose not to use Multi-threaded FFTW, as our FFT size is mostly small. if(0){ ////////////////////////////////////////////////////////////////////////////////////////////////// //Multi-threaded FFTW nthreads = fftwf_init_threads(); if(nthreads == 0){ fprintf(stderr, "WARNING: there is some error in using multi-threaded FFTW.\n"); fprintf(stderr, " therefore it is not used, and computation performance is reduced.\n"); nthreads = 1; } else{ //int this_thread = omp_get_thread_num(), num_threads = omp_get_num_threads(); //nthreads = omp_get_num_threads(); nthreads = omp_get_max_threads(); } printf("FFTW is using %d threads\n", nthreads); //this works for all the following plans if(nthreads != 1) //actually it is OK to pass nthreads=1, in this case, threads are disabled. fftwf_plan_with_nthreads(nthreads); ////////////////////////////////////////////////////////////////////////////////////////////////// } //create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays. p_forward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp, (fftwf_complex*)tmp, FFTW_FORWARD, FFTW_MEASURE); p_backward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp2, (fftwf_complex*)tmp2, FFTW_BACKWARD, FFTW_MEASURE); p_forward_filter = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmpf, (fftwf_complex*)tmpf, FFTW_FORWARD, FFTW_ESTIMATE); //computing filters for(i = 0; i < nout; i++){ bandpass_filter(bw[i], bc[i], nfilter, nfft, (nfilter-1)/2, beta, tmpf); //relationship of nr and matlab fft //nr fft matlab fft // 1 <==> ifft()*nfft // -1 <==> fft() //four1((float *)filter - 1, nfft, -1); fftwf_execute(p_forward_filter); for(j = 0; j < nfft; j++){ filter[i][j].re = tmpf[j].re; filter[i][j].im = tmpf[j].im; } } fftwf_destroy_plan(p_forward_filter); //skip image header if(imageoffset != 0) fseek(infp, imageoffset, SEEK_SET); //process data for(i = 0; i < naz; i++){ //progress report if((i + 1) % 1000 == 0 || (i + 1) == naz) fprintf(stderr,"processing line: %6d of %6d\r", i+1, naz); if((i + 1) == naz) fprintf(stderr,"\n\n"); //read data if(i != 0) fseek(infp, lineoffset-(size_t)nrg * sizeof(fcomplex), SEEK_CUR); readdata((fcomplex *)in, (size_t)nrg * sizeof(fcomplex), infp); //swap bytes if(byteorder!=0){ for(j = 0; j < nrg; j++){ SWAP4(in[j].re); SWAP4(in[j].im); } } #pragma omp parallel for private(j) shared(nrg,in, zeroflag, sc) for(j = 0; j < nrg; j++){ if(in[j].re != 0.0 || in[j].im != 0.0){ zeroflag[j] = 1; in[j].re *= 1.0 / sc; in[j].im *= 1.0 / sc; } else{ zeroflag[j] = 0; } } //process each block for(i_block = 0; i_block < num_block; i_block++){ //zero out //for(j = 0; j < nfft; j++){ // tmp[j].re = 0.0; // tmp[j].im = 0.0; //} memset((void *)tmp, 0, (size_t)nfft*sizeof(fcomplex)); //get data if(num_block == 1){ for(j = 0; j < nrg; j++){ tmp[j] = in[j]; } } else{ if(i_block == num_block - 1){ for(j = 0; j < nblock_in_last; j++){ tmp[j] = in[j+nblock_out*i_block]; } } else{ for(j = 0; j < nblock_in; j++){ tmp[j] = in[j+nblock_out*i_block]; } } } //four1((float *)tmp - 1, nfft, -1); //tested, the same as above fftwf_execute(p_forward); //process each output file for(i_out = 0; i_out < nout; i_out++){ //looks like this makes it slower, so comment out //#pragma omp parallel for private(j) shared(nfft, tmp2, filter, i_out, tmp) for(j = 0; j < nfft; j++) tmp2[j] = cmul(filter[i_out][j], tmp[j]); //four1((float *)tmp2 - 1, nfft, 1); //tested, the same as above fftwf_execute(p_backward); //get data if(num_block == 1){ for(j = 0; j < nrg; j++){ out[i_out][j] = tmp2[j]; } } else{ if(i_block == 0){ for(j = 0; j < hnfilter + nblock_out; j++){ out[i_out][j] = tmp2[j]; } } else if(i_block == num_block - 1){ for(j = 0; j < hnfilter + nblock_out_last; j++){ out[i_out][nrg - 1 - j] = tmp2[nblock_in_last - 1 - j]; } } else{ for(j = 0; j < nblock_out; j++){ out[i_out][j + hnfilter + i_block * nblock_out] = tmp2[j + hnfilter]; } } }//end of getting data }//end of processing each output file }//end of processing each block //move center frequency if(zero_cf == 0){ //process each output file //looks like this makes it slower, so comment out //#pragma omp parallel for private(i_out, j, t, cf_pha, cf) shared(nout, bc, nrg, offset, out) for(i_out = 0; i_out < nout; i_out++){ if(bc[i_out] != 0){ #pragma omp parallel for private(j, t, cf_pha, cf) shared(nrg, offset, bc, i_out, out) for(j = 0; j < nrg; j++){ //t = j - (nrg - 1.0) / 2.0; //make 0 index exactly at range center t = j + offset; //make 0 index exactly at range center cf_pha = 2.0 * PI * (-bc[i_out]) * t; cf.re = cos(cf_pha); cf.im = sin(cf_pha); out[i_out][j] = cmul(out[i_out][j], cf); } } } } //scale back and write data //process each output file for(i_out = 0; i_out < nout; i_out++){ //scale back #pragma omp parallel for private(j) shared(nrg, zeroflag, out, i_out, sc, nfft) for(j = 0; j < nrg; j++){ if(zeroflag[j] == 0){ out[i_out][j].re = 0.0; out[i_out][j].im = 0.0; } else{ out[i_out][j].re *= sc / nfft; out[i_out][j].im *= sc / nfft; } } //write data writedata((fcomplex *)out[i_out], nrg * sizeof(fcomplex), outfp[i_out]); } }//end of processing data fftwf_destroy_plan(p_forward); fftwf_destroy_plan(p_backward); free_array2d_fcomplex(filter); free_array1d_fcomplex(in); free_array2d_fcomplex(out); free_array1d_fcomplex(tmp); free_array1d_fcomplex(tmp2); free_array1d_fcomplex(tmpf); free_array1d_int(zeroflag); //free_array1d_float(bw); //free_array1d_float(bc); fclose(infp); for(i_out = 0; i_out < nout; i_out++) fclose(outfp[i_out]); //free_array1d_FILE(outfp); return 0; }//end main()
DRB058-jacobikernel-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two parallel for loops within one single parallel region, combined with private() and reduction(). */ #include <stdio.h> #include <math.h> #define MSIZE 200 #include <omp.h> int n = 200; int m = 200; int mits = 1000; double tol = 0.0000000001; double relax = 1.0; double alpha = 0.0543; double u[200][200]; double f[200][200]; double uold[200][200]; double dx; double dy; void initialize() { int i; int j; int xx; int yy; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(i,j,xx,yy) #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy) for (j = 0; j <= m - 1; j += 1) { /* -1 < x < 1 */ xx = ((int )(- 1.0 + dx * (i - 1))); /* -1 < y < 1 */ yy = ((int )(- 1.0 + dy * (j - 1))); u[i][j] = 0.0; f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)); } } } void jacobi() { double omega; int i; int j; int k; double error; double resid; double ax; double ay; double b; omega = relax; /* Initialize coefficients */ dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* X-direction coef */ ax = 1.0 / (dx * dx); /* Y-direction coef */ ay = 1.0 / (dy * dy); /* Central coeff */ b = - 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; error = 10.0 * tol; k = 1; while(k <= mits){ error = 0.0; /* Copy new solution into old */ #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= m - 1; j += 1) { uold[i][j] = u[i][j]; } } #pragma omp parallel for private (resid,i,j) reduction (+:error) for (i = 1; i <= n - 1 - 1; i += 1) { #pragma omp parallel for private (resid,j) reduction (+:error) firstprivate (omega,ax,ay,b) for (j = 1; j <= m - 1 - 1; j += 1) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } /* omp end parallel */ /* Error check */ k = k + 1; error = sqrt(error) / (n * m); /* End iteration loop */ } printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n",error); } int main() { initialize(); jacobi(); return 0; }
omp_calloc_def_fb.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> int main() { omp_alloctrait_t at[2]; omp_allocator_handle_t a; void *p[2]; at[0].key = omp_atk_pool_size; at[0].value = 2 * 1024 * 1024; at[1].key = omp_atk_fallback; at[1].value = omp_atv_default_mem_fb; a = omp_init_allocator(omp_large_cap_mem_space, 2, at); printf("allocator large created: %p\n", (void *)a); #pragma omp parallel num_threads(2) { int i = omp_get_thread_num(); p[i] = omp_calloc(1024, 1024, a); #pragma omp barrier printf("th %d, ptr %p\n", i, p[i]); omp_free(p[i], a); } // Both pointers should be non-NULL if (p[0] != NULL && p[1] != NULL) { printf("passed\n"); return 0; } else { printf("failed: pointers %p %p\n", p[0], p[1]); return 1; } }
GB_unaryop__ainv_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_uint64 // op(A') function: GB_tran__ainv_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_uint64 ( uint8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
double foo(int N, double *restrict A) { double S = 0; #pragma omp parallel default(shared) { #pragma omp for reduction(+ : S) for (int I = 0; I < N; ++I) { S += A[I]; A[I] = I; } } return S; }
two.h
#ifndef _TWO_H #define _TWO_H #pragma omp declare target int two(void); #pragma omp end declare target #endif
himeno_multi.c
/* * Original Source : /tmp/tmp.rutwVzhqmN/1.c * Language : C * Compiled Time : 2017-12-06 13:11:03 * Compiler Info : XcodeML/C-FrontEnd * Compiler Version : 1.0.3 */ # 1 "/tmp/tmp.rutwVzhqmN/1.c" typedef void * omp_lock_t; typedef void * omp_nest_lock_t; enum anon_type_1_acc_device_t { acc_device_none = 0, acc_device_default = 1, acc_device_host = 2, acc_device_not_host = 3, acc_device_nvidia = 4, acc_device_radeon = 5, acc_device_xeonphi = 6, acc_device_pgi_opencl = 7, acc_device_nvidia_opencl = 8, acc_device_opencl = 9 }; typedef enum anon_type_1_acc_device_t acc_device_t; typedef unsigned long size_t; typedef unsigned char __u_char; typedef unsigned short __u_short; typedef unsigned int __u_int; typedef unsigned long __u_long; typedef char __int8_t; typedef unsigned char __uint8_t; typedef short __int16_t; typedef unsigned short __uint16_t; typedef int __int32_t; typedef unsigned int __uint32_t; typedef long __int64_t; typedef unsigned long __uint64_t; typedef long __quad_t; typedef unsigned long __u_quad_t; typedef unsigned long __dev_t; typedef unsigned int __uid_t; typedef unsigned int __gid_t; typedef unsigned long __ino_t; typedef unsigned long __ino64_t; typedef unsigned int __mode_t; typedef unsigned long __nlink_t; typedef long __off_t; typedef long __off64_t; typedef int __pid_t; struct anon_type_2___fsid_t { int __val[2]; }; typedef struct anon_type_2___fsid_t __fsid_t; typedef long __clock_t; typedef unsigned long __rlim_t; typedef unsigned long __rlim64_t; typedef unsigned int __id_t; typedef long __time_t; typedef unsigned int __useconds_t; typedef long __suseconds_t; typedef int __daddr_t; typedef int __key_t; typedef int __clockid_t; typedef void * __timer_t; typedef long __blksize_t; typedef long __blkcnt_t; typedef long __blkcnt64_t; typedef unsigned long __fsblkcnt_t; typedef unsigned long __fsblkcnt64_t; typedef unsigned long __fsfilcnt_t; typedef unsigned long __fsfilcnt64_t; typedef long __fsword_t; typedef long __ssize_t; typedef long __syscall_slong_t; typedef unsigned long __syscall_ulong_t; typedef long __loff_t; typedef long * __qaddr_t; typedef char * __caddr_t; typedef long __intptr_t; typedef unsigned int __socklen_t; typedef unsigned int wint_t; union anon_type_4___value { unsigned int __wch; char __wchb[4]; }; typedef struct anon_type_3___mbstate_t __mbstate_t; struct __pgi_tag { unsigned int gp_offset; unsigned int fp_offset; char * overflow_arg_area; char * reg_save_area; }; typedef struct __pgi_tag __pgi_va_list[1]; typedef struct __pgi_tag va_list[1]; typedef struct __pgi_tag __gnuc_va_list[1]; struct _IO_jump_t { }; typedef void _IO_lock_t; enum __codecvt_result { __codecvt_ok = 0, __codecvt_partial = 1, __codecvt_error = 2, __codecvt_noconv = 3 }; struct _IO_FILE_plus { }; typedef long __io_read_fn(void * __cookie, char * __buf, unsigned long __nbytes); typedef long __io_write_fn(void * __cookie, char const * __buf, unsigned long __n); typedef int __io_seek_fn(void * __cookie, long * __pos, int __w); typedef int __io_close_fn(void * __cookie); typedef long off_t; typedef long ssize_t; typedef int wchar_t; enum anon_type_7_idtype_t { P_ALL = 0, P_PID = 1, P_PGID = 2 }; typedef enum anon_type_7_idtype_t idtype_t; struct anon_type_8___wait_terminated { unsigned int __w_termsig:7; unsigned int __w_coredump:1; unsigned int __w_retcode:8; unsigned int anon_mem_1:16; }; struct anon_type_9___wait_stopped { unsigned int __w_stopval:8; unsigned int __w_stopsig:8; unsigned int anon_mem_2:16; }; struct anon_type_10_div_t { int quot; int rem; }; typedef struct anon_type_10_div_t div_t; struct anon_type_11_ldiv_t { long quot; long rem; }; typedef struct anon_type_11_ldiv_t ldiv_t; struct anon_type_12_lldiv_t { long long quot; long long rem; }; typedef struct anon_type_12_lldiv_t lldiv_t; typedef unsigned char u_char; typedef unsigned short u_short; typedef unsigned int u_int; typedef unsigned long u_long; typedef long quad_t; typedef unsigned long u_quad_t; typedef struct anon_type_2___fsid_t fsid_t; typedef long loff_t; typedef unsigned long ino_t; typedef unsigned long dev_t; typedef unsigned int gid_t; typedef unsigned int mode_t; typedef unsigned long nlink_t; typedef unsigned int uid_t; typedef int pid_t; typedef unsigned int id_t; typedef int daddr_t; typedef char * caddr_t; typedef int key_t; typedef long clock_t; typedef long time_t; typedef int clockid_t; typedef void * timer_t; typedef unsigned long ulong; typedef unsigned short ushort; typedef unsigned int uint; typedef char int8_t; typedef short int16_t; typedef int int32_t; typedef long int64_t; typedef unsigned char u_int8_t; typedef unsigned short u_int16_t; typedef unsigned int u_int32_t; typedef unsigned long u_int64_t; typedef int register_t; typedef int __sig_atomic_t; struct anon_type_13___sigset_t { unsigned long __val[(1024) / ((8) * (sizeof(unsigned long)))]; }; typedef struct anon_type_13___sigset_t __sigset_t; typedef struct anon_type_13___sigset_t sigset_t; struct timespec { long tv_sec; long tv_nsec; }; struct timeval { long tv_sec; long tv_usec; }; typedef long suseconds_t; typedef long __fd_mask; struct anon_type_14_fd_set { long __fds_bits[(1024) / ((8) * ((int)(sizeof(long))))]; }; typedef struct anon_type_14_fd_set fd_set; typedef long fd_mask; typedef long blksize_t; typedef long blkcnt_t; typedef unsigned long fsblkcnt_t; typedef unsigned long fsfilcnt_t; typedef unsigned long pthread_t; union pthread_attr_t { char __size[56]; long __align; }; typedef union pthread_attr_t pthread_attr_t; union anon_type_16_pthread_mutexattr_t { char __size[4]; int __align; }; typedef union anon_type_16_pthread_mutexattr_t pthread_mutexattr_t; struct anon_type_18___data { int __lock; unsigned int __futex; unsigned long long __total_seq; unsigned long long __wakeup_seq; unsigned long long __woken_seq; void * __mutex; unsigned int __nwaiters; unsigned int __broadcast_seq; }; typedef union anon_type_17_pthread_cond_t pthread_cond_t; union anon_type_19_pthread_condattr_t { char __size[4]; int __align; }; typedef union anon_type_19_pthread_condattr_t pthread_condattr_t; typedef unsigned int pthread_key_t; typedef int pthread_once_t; struct anon_type_21___data { int __lock; unsigned int __nr_readers; unsigned int __readers_wakeup; unsigned int __writer_wakeup; unsigned int __nr_readers_queued; unsigned int __nr_writers_queued; int __writer; int __shared; char __rwelision; unsigned char __pad1[7]; unsigned long __pad2; unsigned int __flags; }; typedef union anon_type_20_pthread_rwlock_t pthread_rwlock_t; union anon_type_22_pthread_rwlockattr_t { char __size[8]; long __align; }; typedef union anon_type_22_pthread_rwlockattr_t pthread_rwlockattr_t; typedef int volatile pthread_spinlock_t; union anon_type_23_pthread_barrier_t { char __size[32]; long __align; }; typedef union anon_type_23_pthread_barrier_t pthread_barrier_t; union anon_type_24_pthread_barrierattr_t { char __size[4]; int __align; }; typedef union anon_type_24_pthread_barrierattr_t pthread_barrierattr_t; struct random_data { int * fptr; int * rptr; int * state; int rand_type; int rand_deg; int rand_sep; int * end_ptr; }; struct drand48_data { unsigned short __x[3]; unsigned short __old_x[3]; unsigned short __c; unsigned short __init; unsigned long long __a; }; typedef int (* __compar_fn_t)(void const * , void const * ); struct __locale_data { }; struct timezone { int tz_minuteswest; int tz_dsttime; }; typedef struct timezone * restrict __timezone_ptr_t; enum __itimer_which { ITIMER_REAL = 0, ITIMER_VIRTUAL = 1, ITIMER_PROF = 2 }; struct itimerval { struct timeval it_interval; struct timeval it_value; }; typedef int __itimer_which_t; struct Mat { float * m; int mnums; int mrows; int mcols; int mdeps; }; typedef struct Mat Matrix; struct _IO_FILE; struct _IO_marker; typedef struct _IO_FILE FILE; typedef struct _IO_FILE __FILE; struct anon_type_3___mbstate_t { int __count; union anon_type_4___value __value; }; struct anon_type_5__G_fpos_t { long __pos; struct anon_type_3___mbstate_t __state; }; typedef struct anon_type_5__G_fpos_t _G_fpos_t; struct anon_type_6__G_fpos64_t { long __pos; struct anon_type_3___mbstate_t __state; }; typedef struct anon_type_6__G_fpos64_t _G_fpos64_t; struct _IO_marker { struct _IO_marker * _next; struct _IO_FILE * _sbuf; int _pos; }; struct _IO_FILE { int _flags; char * _IO_read_ptr; char * _IO_read_end; char * _IO_read_base; char * _IO_write_base; char * _IO_write_ptr; char * _IO_write_end; char * _IO_buf_base; char * _IO_buf_end; char * _IO_save_base; char * _IO_backup_base; char * _IO_save_end; struct _IO_marker * _markers; struct _IO_FILE * _chain; int _fileno; int _flags2; long _old_offset; unsigned short _cur_column; char _vtable_offset; char _shortbuf[1]; void * _lock; long _offset; void * __pad1; void * __pad2; void * __pad3; void * __pad4; unsigned long __pad5; int _mode; char _unused2[(((15) * (sizeof(int))) - ((4) * (sizeof(void * )))) - (sizeof(unsigned long))]; }; typedef struct _IO_FILE _IO_FILE; typedef struct anon_type_5__G_fpos_t fpos_t; union wait { int w_status; struct anon_type_8___wait_terminated __wait_terminated; struct anon_type_9___wait_stopped __wait_stopped; }; struct __pthread_internal_list; struct __pthread_internal_list { struct __pthread_internal_list * __prev; struct __pthread_internal_list * __next; }; typedef struct __pthread_internal_list __pthread_list_t; struct __pthread_mutex_s { int __lock; unsigned int __count; int __owner; unsigned int __nusers; int __kind; short __spins; short __elision; struct __pthread_internal_list __list; }; typedef union anon_type_15_pthread_mutex_t pthread_mutex_t; union anon_type_17_pthread_cond_t { struct anon_type_18___data __data; char __size[48]; long long __align; }; union anon_type_20_pthread_rwlock_t { struct anon_type_21___data __data; char __size[56]; long __align; }; struct __locale_struct { struct __locale_data * __locales[13]; unsigned short const * __ctype_b; int const * __ctype_tolower; int const * __ctype_toupper; char const * __names[13]; }; typedef struct __locale_struct * __locale_t; typedef struct __locale_struct * locale_t; struct __MaccDataTableEntry; struct __MaccDataTableEntry { void * addr; void * addr_ub; int type_size; int entire_lb; int entire_ub; int dirty; int dirty_lb; int dirty_ub; int offset; struct __MaccDataTableEntry * next; }; struct __MaccDataTable { struct __MaccDataTableEntry * entries[256]; }; struct __MaccDataWrapCache { void * addr[256]; struct __MaccDataTableEntry * entry[256]; int offset[256]; int cachenum[16]; }; union anon_type_15_pthread_mutex_t { struct __pthread_mutex_s __data; char __size[40]; long __align; }; void omp_set_num_threads(int num); int omp_get_num_threads(void); int omp_get_max_threads(void); int omp_get_thread_num(void); int omp_get_num_procs(void); int omp_in_parallel(void); void omp_set_dynamic(int dynamic_thds); int omp_get_dynamic(void); void omp_set_nested(int n_nested); int omp_get_nested(void); double omp_get_wtime(void); double omp_get_wtick(void); void omp_init_lock(void * * lock); void omp_init_nest_lock(void * * lock); void omp_destroy_lock(void * * lock); void omp_destroy_nest_lock(void * * lock); void omp_set_lock(void * * lock); void omp_set_nest_lock(void * * lock); void omp_unset_lock(void * * lock); void omp_unset_nest_lock(void * * lock); int omp_test_lock(void * * lock); int omp_test_nest_lock(void * * lock); void acc_set_default_async(int async); int acc_get_default_async(void); extern int acc_get_num_devices(enum anon_type_1_acc_device_t devtype); extern enum anon_type_1_acc_device_t acc_get_device(void); extern void acc_set_device_num(int devnum, enum anon_type_1_acc_device_t devtype); extern int acc_get_device_num(enum anon_type_1_acc_device_t devtype); extern void acc_init(enum anon_type_1_acc_device_t devtype); extern void acc_shutdown(enum anon_type_1_acc_device_t devtype); extern void acc_set_deviceid(int devid); extern int acc_get_deviceid(int devnum, enum anon_type_1_acc_device_t devtype); extern int acc_async_test(long async); extern int acc_async_test_all(void); extern void acc_async_wait(long async); extern void acc_async_wait_all(void); extern void acc_wait(long async); extern void acc_wait_async(long arg, long async); extern void acc_wait_all(void); extern void acc_wait_all_async(long async); extern int acc_on_device(enum anon_type_1_acc_device_t devtype); extern void __macc_free(void * ); extern void * acc_memcpy(void * targetptr, void * srcptr, unsigned long bytes); extern void * acc_memcpy_async(void * targetptr, void * srcptr, unsigned long bytes, long async); extern void * acc_copyin(void * hostptr, unsigned long bytes); extern void * acc_copyin_async(void * hostptr, unsigned long bytes, long async); extern void * acc_pcopyin(void * hostptr, unsigned long bytes); extern void * acc_pcopyin_async(void * hostptr, unsigned long bytes, long async); extern void * acc_present_or_copyin(void * hostptr, unsigned long bytes); extern void * acc_present_or_copyin_async(void * hostptr, unsigned long bytes, long async); extern void * acc_create(void * hostptr, unsigned long bytes); extern void * acc_create_async(void * hostptr, unsigned long bytes, long async); extern void * acc_pcreate(void * hostptr, unsigned long bytes); extern void * acc_pcreate_async(void * hostptr, unsigned long bytes, long async); extern void * acc_present_or_create(void * hostptr, unsigned long bytes); extern void * acc_present_or_create_async(void * hostptr, unsigned long bytes, long async); extern void acc_copyout(void * hostptr, unsigned long bytes); extern void acc_copyout_async(void * hostptr, unsigned long bytes, long async); extern void acc_delete(void * hostptr, unsigned long bytes); extern void acc_delete_async(void * hostptr, unsigned long bytes, long async); extern void acc_update_device(void * hostptr, unsigned long bytes); extern void acc_update_device_async(void * hostptr, unsigned long bytes, long async); extern void acc_update_self(void * hostptr, unsigned long bytes); extern void acc_update_self_async(void * hostptr, unsigned long bytes, long async); extern void acc_update_host(void * hostptr, unsigned long bytes); extern void acc_update_host_async(void * hostptr, unsigned long bytes, long async); extern void acc_memcpy_to_device(void * devptr, void * hostptr, unsigned long bytes); extern void acc_memcpy_to_device_async(void * devptr, void * hostptr, unsigned long bytes, long async); extern void acc_memcpy_from_device(void * hostptr, void * devptr, unsigned long bytes); extern void acc_memcpy_from_device_async(void * hostptr, void * devptr, unsigned long bytes, long async); extern void * acc_memcpy_device(void * targetdevptr, void * srcdevptr, unsigned long bytes); extern void * acc_memcpy_device_async(void * targetdevptr, void * srcdevptr, unsigned long bytes, long async); extern void acc_attach(void * * hostptrptr); extern void acc_attach_async(void * * hostptrptr, long async); extern void acc_detach(void * * hostptrptr); extern void acc_detach_async(void * * hostptrptr, long async); extern void acc_set_device_type(enum anon_type_1_acc_device_t devtype); extern enum anon_type_1_acc_device_t acc_get_device_type(void); extern void * __macc_malloc(unsigned long); extern void * acc_deviceptr(void * hostptr); extern void * acc_hostptr(void * devptr); extern void acc_map_data(void * hostptr, void * devptr, unsigned long bytes); extern void acc_unmap_data(void * hostptr); extern int acc_is_present(void * hostptr, unsigned long bytes); extern int acc_present_count(void * hostptr); extern void acc_updatein(void * hostptr, unsigned long bytes); extern void acc_updatein_async(void * hostptr, unsigned long bytes, long async); extern void acc_updateout(void * hostptr, unsigned long bytes); extern void acc_updateout_async(void * hostptr, unsigned long bytes, long async); extern void * acc_get_current_cuda_context(void); extern int acc_get_current_cuda_device(void); extern void * acc_get_cuda_stream(long); extern void acc_set_cuda_stream(long, void * ); extern void * acc_cuda_get_context(int); extern int acc_cuda_get_device(int); extern void * acc_get_current_opencl_context(void); extern void * acc_get_current_opencl_device(void); extern void * acc_get_opencl_queue(long); extern int atomicaddi(void * address, int val); extern unsigned int atomicaddu(void * address, unsigned int val); extern unsigned long long atomicaddul(void * address, unsigned long long val); extern float atomicaddf(void * address, float val); extern double atomicaddd(void * address, double val); extern int atomicsubi(void * address, int val); extern unsigned int atomicsubu(void * address, unsigned int val); extern unsigned long long atomicsubul(void * address, unsigned long long val); extern float atomicsubf(void * address, float val); extern double atomicsubd(void * address, double val); extern int atomicmaxi(void * address, int val); extern unsigned int atomicmaxu(void * address, unsigned int val); extern unsigned long long atomicmaxul(void * address, unsigned long long val); extern float atomicmaxf(void * address, float val); extern double atomicmaxd(void * address, double val); extern int atomicmini(void * address, int val); extern unsigned int atomicminu(void * address, unsigned int val); extern unsigned long long atomicminul(void * address, unsigned long long val); extern float atomicminf(void * address, float val); extern double atomicmind(void * address, double val); extern int atomicandi(void * address, int val); extern unsigned int atomicandu(void * address, unsigned int val); extern unsigned long long atomicandul(void * address, unsigned long long val); extern int atomicori(void * address, int val); extern unsigned int atomicoru(void * address, unsigned int val); extern unsigned long long atomicorul(void * address, unsigned long long val); extern int atomicxori(void * address, int val); extern unsigned int atomicxoru(void * address, unsigned int val); extern unsigned long long atomicxorul(void * address, unsigned long long val); extern int atomicexchi(void * address, int val); extern unsigned int atomicexchu(void * address, unsigned int val); extern unsigned long long atomicexchul(void * address, unsigned long long val); extern float atomicexchf(void * address, float val); extern double atomicexchd(void * address, double val); extern unsigned int atomicincu(void * address, unsigned int val); extern unsigned int atomicdecu(void * address, unsigned int val); extern int atomiccasi(void * address, int val, int val2); extern unsigned int atomiccasu(void * address, unsigned int val, unsigned int val2); extern unsigned long long atomiccasul(void * address, unsigned long long val, unsigned long long val2); extern float atomiccasf(void * address, float val, float val2); extern double atomiccasd(void * address, double val, double val2); extern int __pgi_gangidx(void); extern int __pgi_workeridx(void); extern int __pgi_vectoridx(void); extern int __pgi_blockidx(int); extern int __pgi_threadidx(int); extern void * __builtin_va_arg(); extern int __builtin_va_start(); # 315 "/usr/include/libio.h" extern struct _IO_FILE_plus _IO_2_1_stdin_; # 316 "/usr/include/libio.h" extern struct _IO_FILE_plus _IO_2_1_stdout_; # 317 "/usr/include/libio.h" extern struct _IO_FILE_plus _IO_2_1_stderr_; extern int __underflow(struct _IO_FILE * ); extern int __uflow(struct _IO_FILE * ); extern int __overflow(struct _IO_FILE * , int); extern int _IO_getc(struct _IO_FILE * __fp); extern int _IO_putc(int __c, struct _IO_FILE * __fp); extern int _IO_feof(struct _IO_FILE * __fp); extern int _IO_ferror(struct _IO_FILE * __fp); extern int _IO_peekc_locked(struct _IO_FILE * __fp); extern void _IO_flockfile(struct _IO_FILE * ); extern void _IO_funlockfile(struct _IO_FILE * ); extern int _IO_ftrylockfile(struct _IO_FILE * ); extern int _IO_vfscanf(struct _IO_FILE * restrict, char const * restrict, struct __pgi_tag [1], int * restrict); extern int _IO_vfprintf(struct _IO_FILE * restrict, char const * restrict, struct __pgi_tag [1]); extern long _IO_padn(struct _IO_FILE * , int, long); extern unsigned long _IO_sgetn(struct _IO_FILE * , void * , unsigned long); extern long _IO_seekoff(struct _IO_FILE * , long, int, int); extern long _IO_seekpos(struct _IO_FILE * , long, int); extern void _IO_free_backup_area(struct _IO_FILE * ); # 168 "/usr/include/stdio.h" extern struct _IO_FILE * stdin; # 169 "/usr/include/stdio.h" extern struct _IO_FILE * stdout; # 170 "/usr/include/stdio.h" extern struct _IO_FILE * stderr; extern int remove(char const * __filename); extern int rename(char const * __old, char const * __new); extern int renameat(int __oldfd, char const * __old, int __newfd, char const * __new); extern struct _IO_FILE * tmpfile(void); extern char * tmpnam(char * __s); extern char * tmpnam_r(char * __s); extern char * tempnam(char const * __dir, char const * __pfx); extern int fclose(struct _IO_FILE * __stream); extern int fflush(struct _IO_FILE * __stream); extern int fflush_unlocked(struct _IO_FILE * __stream); extern struct _IO_FILE * fopen(char const * restrict __filename, char const * restrict __modes); extern struct _IO_FILE * freopen(char const * restrict __filename, char const * restrict __modes, struct _IO_FILE * restrict __stream); extern struct _IO_FILE * fdopen(int __fd, char const * __modes); extern struct _IO_FILE * fmemopen(void * __s, unsigned long __len, char const * __modes); extern struct _IO_FILE * open_memstream(char * * __bufloc, unsigned long * __sizeloc); extern void setbuf(struct _IO_FILE * restrict __stream, char * restrict __buf); extern int setvbuf(struct _IO_FILE * restrict __stream, char * restrict __buf, int __modes, unsigned long __n); extern void setbuffer(struct _IO_FILE * restrict __stream, char * restrict __buf, unsigned long __size); extern void setlinebuf(struct _IO_FILE * __stream); extern int fprintf(struct _IO_FILE * restrict __stream, char const * restrict __format, ...); extern int printf(char const * restrict __format, ...); extern int sprintf(char * restrict __s, char const * restrict __format, ...); extern int vfprintf(struct _IO_FILE * restrict __s, char const * restrict __format, struct __pgi_tag __arg[1]); extern int vprintf(char const * restrict __format, struct __pgi_tag __arg[1]); extern int vsprintf(char * restrict __s, char const * restrict __format, struct __pgi_tag __arg[1]); extern __attribute__((format(__printf__, 3, 4))) int snprintf(char * restrict __s, unsigned long __maxlen, char const * restrict __format, ...); extern __attribute__((format(__printf__, 3, 0))) int vsnprintf(char * restrict __s, unsigned long __maxlen, char const * restrict __format, struct __pgi_tag __arg[1]); extern __attribute__((format(__printf__, 2, 0))) int vdprintf(int __fd, char const * restrict __fmt, struct __pgi_tag __arg[1]); extern __attribute__((format(__printf__, 2, 3))) int dprintf(int __fd, char const * restrict __fmt, ...); extern int fscanf(struct _IO_FILE * restrict __stream, char const * restrict __format, ...); extern int scanf(char const * restrict __format, ...); extern int sscanf(char const * restrict __s, char const * restrict __format, ...); extern int __isoc99_fscanf(struct _IO_FILE * restrict __stream, char const * restrict __format, ...); extern int __isoc99_scanf(char const * restrict __format, ...); extern int __isoc99_sscanf(char const * restrict __s, char const * restrict __format, ...); extern __attribute__((format(__scanf__, 2, 0))) int vfscanf(struct _IO_FILE * restrict __s, char const * restrict __format, struct __pgi_tag __arg[1]); extern __attribute__((format(__scanf__, 1, 0))) int vscanf(char const * restrict __format, struct __pgi_tag __arg[1]); extern __attribute__((format(__scanf__, 2, 0))) int vsscanf(char const * restrict __s, char const * restrict __format, struct __pgi_tag __arg[1]); extern int __isoc99_vfscanf(struct _IO_FILE * restrict __s, char const * restrict __format, struct __pgi_tag __arg[1]); extern int __isoc99_vscanf(char const * restrict __format, struct __pgi_tag __arg[1]); extern int __isoc99_vsscanf(char const * restrict __s, char const * restrict __format, struct __pgi_tag __arg[1]); extern int fgetc(struct _IO_FILE * __stream); extern int getc(struct _IO_FILE * __stream); extern int getchar(void); extern int getc_unlocked(struct _IO_FILE * __stream); extern int getchar_unlocked(void); extern int fgetc_unlocked(struct _IO_FILE * __stream); extern int fputc(int __c, struct _IO_FILE * __stream); extern int putc(int __c, struct _IO_FILE * __stream); extern int putchar(int __c); extern int fputc_unlocked(int __c, struct _IO_FILE * __stream); extern int putc_unlocked(int __c, struct _IO_FILE * __stream); extern int putchar_unlocked(int __c); extern int getw(struct _IO_FILE * __stream); extern int putw(int __w, struct _IO_FILE * __stream); extern char * fgets(char * restrict __s, int __n, struct _IO_FILE * restrict __stream); extern char * gets(char * __s); extern long __getdelim(char * * restrict __lineptr, unsigned long * restrict __n, int __delimiter, struct _IO_FILE * restrict __stream); extern long getdelim(char * * restrict __lineptr, unsigned long * restrict __n, int __delimiter, struct _IO_FILE * restrict __stream); extern long getline(char * * restrict __lineptr, unsigned long * restrict __n, struct _IO_FILE * restrict __stream); extern int fputs(char const * restrict __s, struct _IO_FILE * restrict __stream); extern int puts(char const * __s); extern int ungetc(int __c, struct _IO_FILE * __stream); extern unsigned long fread(void * restrict __ptr, unsigned long __size, unsigned long __n, struct _IO_FILE * restrict __stream); extern unsigned long fwrite(void const * restrict __ptr, unsigned long __size, unsigned long __n, struct _IO_FILE * restrict __s); extern unsigned long fread_unlocked(void * restrict __ptr, unsigned long __size, unsigned long __n, struct _IO_FILE * restrict __stream); extern unsigned long fwrite_unlocked(void const * restrict __ptr, unsigned long __size, unsigned long __n, struct _IO_FILE * restrict __stream); extern int fseek(struct _IO_FILE * __stream, long __off, int __whence); extern long ftell(struct _IO_FILE * __stream); extern void rewind(struct _IO_FILE * __stream); extern int fseeko(struct _IO_FILE * __stream, long __off, int __whence); extern long ftello(struct _IO_FILE * __stream); extern int fgetpos(struct _IO_FILE * restrict __stream, struct anon_type_5__G_fpos_t * restrict __pos); extern int fsetpos(struct _IO_FILE * __stream, struct anon_type_5__G_fpos_t const * __pos); extern void clearerr(struct _IO_FILE * __stream); extern int feof(struct _IO_FILE * __stream); extern int ferror(struct _IO_FILE * __stream); extern void clearerr_unlocked(struct _IO_FILE * __stream); extern int feof_unlocked(struct _IO_FILE * __stream); extern int ferror_unlocked(struct _IO_FILE * __stream); extern void perror(char const * __s); # 26 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" extern int sys_nerr; # 27 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" extern char const * const sys_errlist[]; extern int fileno(struct _IO_FILE * __stream); extern int fileno_unlocked(struct _IO_FILE * __stream); extern struct _IO_FILE * popen(char const * __command, char const * __modes); extern int pclose(struct _IO_FILE * __stream); extern char * ctermid(char * __s); extern void flockfile(struct _IO_FILE * __stream); extern int ftrylockfile(struct _IO_FILE * __stream); extern void funlockfile(struct _IO_FILE * __stream); # 44 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" static unsigned short __bswap_16(unsigned short __bsx) { # 47 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" return (unsigned short)(((__bsx >> (8)) & (255)) | ((__bsx & (255)) << (8))); } # 87 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" static unsigned int __bswap_32(unsigned int __bsx) { # 90 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" return ((((__bsx & (-16777216u)) >> (24)) | ((__bsx & (16711680)) >> (8))) | ((__bsx & (65280)) << (8))) | ((__bsx & (255)) << (24)); } # 148 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" static unsigned long __bswap_64(unsigned long __bsx) { # 151 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" return ((((((((__bsx & (0xff00000000000000ull)) >> (56)) | ((__bsx & (0xff000000000000ull)) >> (40))) | ((__bsx & (0xff0000000000ull)) >> (24))) | ((__bsx & (0xff00000000ull)) >> (8))) | ((__bsx & (0x0ff000000ull)) << (8))) | ((__bsx & (0x000ff0000ull)) << (24))) | ((__bsx & (0x00000ff00ull)) << (40))) | ((__bsx & (0x0000000ffull)) << (56)); } extern unsigned long __ctype_get_mb_cur_max(void); extern double atof(char const * __nptr); extern int atoi(char const * __nptr); extern long atol(char const * __nptr); extern long long atoll(char const * __nptr); extern double strtod(char const * restrict __nptr, char * * restrict __endptr); extern float strtof(char const * restrict __nptr, char * * restrict __endptr); extern long double strtold(char const * restrict __nptr, char * * restrict __endptr); extern long strtol(char const * restrict __nptr, char * * restrict __endptr, int __base); extern unsigned long strtoul(char const * restrict __nptr, char * * restrict __endptr, int __base); extern long long strtoq(char const * restrict __nptr, char * * restrict __endptr, int __base); extern unsigned long long strtouq(char const * restrict __nptr, char * * restrict __endptr, int __base); extern long long strtoll(char const * restrict __nptr, char * * restrict __endptr, int __base); extern unsigned long long strtoull(char const * restrict __nptr, char * * restrict __endptr, int __base); extern char * l64a(long __n); extern long a64l(char const * __s); extern int select(int __nfds, struct anon_type_14_fd_set * restrict __readfds, struct anon_type_14_fd_set * restrict __writefds, struct anon_type_14_fd_set * restrict __exceptfds, struct timeval * restrict __timeout); extern int pselect(int __nfds, struct anon_type_14_fd_set * restrict __readfds, struct anon_type_14_fd_set * restrict __writefds, struct anon_type_14_fd_set * restrict __exceptfds, struct timespec const * restrict __timeout, struct anon_type_13___sigset_t const * restrict __sigmask); extern unsigned int gnu_dev_major(unsigned long long __dev); extern unsigned int gnu_dev_minor(unsigned long long __dev); extern unsigned long long gnu_dev_makedev(unsigned int __major, unsigned int __minor); extern long random(void); extern void srandom(unsigned int __seed); extern char * initstate(unsigned int __seed, char * __statebuf, unsigned long __statelen); extern char * setstate(char * __statebuf); extern int random_r(struct random_data * restrict __buf, int * restrict __result); extern int srandom_r(unsigned int __seed, struct random_data * __buf); extern int initstate_r(unsigned int __seed, char * restrict __statebuf, unsigned long __statelen, struct random_data * restrict __buf); extern int setstate_r(char * restrict __statebuf, struct random_data * restrict __buf); extern int rand(void); extern void srand(unsigned int __seed); extern int rand_r(unsigned int * __seed); extern double drand48(void); extern double erand48(unsigned short __xsubi[3]); extern long lrand48(void); extern long nrand48(unsigned short __xsubi[3]); extern long mrand48(void); extern long jrand48(unsigned short __xsubi[3]); extern void srand48(long __seedval); extern unsigned short * seed48(unsigned short __seed16v[3]); extern void lcong48(unsigned short __param[7]); extern int drand48_r(struct drand48_data * restrict __buffer, double * restrict __result); extern int erand48_r(unsigned short __xsubi[3], struct drand48_data * restrict __buffer, double * restrict __result); extern int lrand48_r(struct drand48_data * restrict __buffer, long * restrict __result); extern int nrand48_r(unsigned short __xsubi[3], struct drand48_data * restrict __buffer, long * restrict __result); extern int mrand48_r(struct drand48_data * restrict __buffer, long * restrict __result); extern int jrand48_r(unsigned short __xsubi[3], struct drand48_data * restrict __buffer, long * restrict __result); extern int srand48_r(long __seedval, struct drand48_data * __buffer); extern int seed48_r(unsigned short __seed16v[3], struct drand48_data * __buffer); extern int lcong48_r(unsigned short __param[7], struct drand48_data * __buffer); extern void * malloc(unsigned long __size); extern void * calloc(unsigned long __nmemb, unsigned long __size); extern void * realloc(void * __ptr, unsigned long __size); extern void free(void * __ptr); extern void cfree(void * __ptr); extern void * __alloca(unsigned long __size); extern void * alloca(unsigned long __size); extern void * __builtin_alloca(unsigned long __size); extern void * valloc(unsigned long __size); extern int posix_memalign(void * * __memptr, unsigned long __alignment, unsigned long __size); extern __attribute__((noreturn)) void abort(void); extern int atexit(void (* __func)(void)); extern int on_exit(void (* __func)(int, void * ), void * __arg); extern __attribute__((noreturn)) void exit(int __status); extern __attribute__((noreturn)) void _Exit(int __status); extern char * getenv(char const * __name); extern int putenv(char * __string); extern int setenv(char const * __name, char const * __value, int __replace); extern int unsetenv(char const * __name); extern int clearenv(void); extern char * mktemp(char * __template); extern int mkstemp(char * __template); extern int mkstemps(char * __template, int __suffixlen); extern char * mkdtemp(char * __template); extern int system(char const * __command); extern char * realpath(char const * restrict __name, char * restrict __resolved); extern void * bsearch(void const * __key, void const * __base, unsigned long __nmemb, unsigned long __size, int (* __compar)(void const * , void const * )); extern void qsort(void * __base, unsigned long __nmemb, unsigned long __size, int (* __compar)(void const * , void const * )); extern __attribute__((const)) int abs(int __x); extern __attribute__((const)) long labs(long __x); extern __attribute__((const)) long long llabs(long long __x); extern __attribute__((const)) struct anon_type_10_div_t div(int __numer, int __denom); extern __attribute__((const)) struct anon_type_11_ldiv_t ldiv(long __numer, long __denom); extern __attribute__((const)) struct anon_type_12_lldiv_t lldiv(long long __numer, long long __denom); extern char * ecvt(double __value, int __ndigit, int * restrict __decpt, int * restrict __sign); extern char * fcvt(double __value, int __ndigit, int * restrict __decpt, int * restrict __sign); extern char * gcvt(double __value, int __ndigit, char * __buf); extern char * qecvt(long double __value, int __ndigit, int * restrict __decpt, int * restrict __sign); extern char * qfcvt(long double __value, int __ndigit, int * restrict __decpt, int * restrict __sign); extern char * qgcvt(long double __value, int __ndigit, char * __buf); extern int ecvt_r(double __value, int __ndigit, int * restrict __decpt, int * restrict __sign, char * restrict __buf, unsigned long __len); extern int fcvt_r(double __value, int __ndigit, int * restrict __decpt, int * restrict __sign, char * restrict __buf, unsigned long __len); extern int qecvt_r(long double __value, int __ndigit, int * restrict __decpt, int * restrict __sign, char * restrict __buf, unsigned long __len); extern int qfcvt_r(long double __value, int __ndigit, int * restrict __decpt, int * restrict __sign, char * restrict __buf, unsigned long __len); extern int mblen(char const * __s, unsigned long __n); extern int mbtowc(int * restrict __pwc, char const * restrict __s, unsigned long __n); extern int wctomb(char * __s, int __wchar); extern unsigned long mbstowcs(int * restrict __pwcs, char const * restrict __s, unsigned long __n); extern unsigned long wcstombs(char * restrict __s, int const * restrict __pwcs, unsigned long __n); extern int rpmatch(char const * __response); extern int getsubopt(char * * restrict __optionp, char * const * restrict __tokens, char * * restrict __valuep); extern int getloadavg(double __loadavg[], int __nelem); int __builtin_abs(int); extern void * malloc_managed(unsigned long); extern void * calloc_managed(unsigned long, unsigned long); extern void free_managed(void * ); extern void cfree_managed(void * ); extern void * realloc_managed(void * , unsigned long); extern void * valloc_managed(unsigned long); extern void * pvalloc_managed(unsigned long); extern void * memalign_managed(unsigned long, unsigned long); extern int posix_memalign_managed(void * * , unsigned long, unsigned long); extern char * tmpnam_managed(char * ); extern void * memcpy(void * restrict __dest, void const * restrict __src, unsigned long __n); extern void * memmove(void * __dest, void const * __src, unsigned long __n); extern void * memccpy(void * restrict __dest, void const * restrict __src, int __c, unsigned long __n); extern void * memset(void * __s, int __c, unsigned long __n); extern int memcmp(void const * __s1, void const * __s2, unsigned long __n); extern void * memchr(void const * __s, int __c, unsigned long __n); extern char * strcpy(char * restrict __dest, char const * restrict __src); extern char * strncpy(char * restrict __dest, char const * restrict __src, unsigned long __n); extern char * strcat(char * restrict __dest, char const * restrict __src); extern char * strncat(char * restrict __dest, char const * restrict __src, unsigned long __n); extern int strcmp(char const * __s1, char const * __s2); extern int strncmp(char const * __s1, char const * __s2, unsigned long __n); extern int strcoll(char const * __s1, char const * __s2); extern unsigned long strxfrm(char * restrict __dest, char const * restrict __src, unsigned long __n); extern int strcoll_l(char const * __s1, char const * __s2, struct __locale_struct * __l); extern unsigned long strxfrm_l(char * __dest, char const * __src, unsigned long __n, struct __locale_struct * __l); extern char * strdup(char const * __s); extern char * strndup(char const * __string, unsigned long __n); extern char * strchr(char const * __s, int __c); extern char * strrchr(char const * __s, int __c); extern unsigned long strcspn(char const * __s, char const * __reject); extern unsigned long strspn(char const * __s, char const * __accept); extern char * strpbrk(char const * __s, char const * __accept); extern char * strstr(char const * __haystack, char const * __needle); extern char * strtok(char * restrict __s, char const * restrict __delim); extern char * __strtok_r(char * restrict __s, char const * restrict __delim, char * * restrict __save_ptr); extern char * strtok_r(char * restrict __s, char const * restrict __delim, char * * restrict __save_ptr); extern unsigned long strlen(char const * __s); extern unsigned long strnlen(char const * __string, unsigned long __maxlen); extern char * strerror(int __errnum); extern int __xpg_strerror_r(int __errnum, char * __buf, unsigned long __buflen); extern char * strerror_l(int __errnum, struct __locale_struct * __l); extern void __bzero(void * __s, unsigned long __n); extern void bcopy(void const * __src, void * __dest, unsigned long __n); extern void bzero(void * __s, unsigned long __n); extern int bcmp(void const * __s1, void const * __s2, unsigned long __n); extern char * index(char const * __s, int __c); extern char * rindex(char const * __s, int __c); extern __attribute__((const)) int ffs(int __i); extern int strcasecmp(char const * __s1, char const * __s2); extern int strncasecmp(char const * __s1, char const * __s2, unsigned long __n); extern char * strsep(char * * restrict __stringp, char const * restrict __delim); extern char * strsignal(int __sig); extern char * __stpcpy(char * restrict __dest, char const * restrict __src); extern char * stpcpy(char * restrict __dest, char const * restrict __src); extern char * __stpncpy(char * restrict __dest, char const * restrict __src, unsigned long __n); extern char * stpncpy(char * restrict __dest, char const * restrict __src, unsigned long __n); # 27 "/tmp/tmp.rutwVzhqmN/1.c" int __MACC_NUMGPUS = -(1); # 29 "/tmp/tmp.rutwVzhqmN/1.c" int __macc_get_num_gpus() { # 31 "/tmp/tmp.rutwVzhqmN/1.c" return acc_get_num_devices(acc_device_nvidia); } # 34 "/tmp/tmp.rutwVzhqmN/1.c" int __MACC_TOPOLOGY[10]; # 36 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_set_gpu_num(int i) { # 38 "/tmp/tmp.rutwVzhqmN/1.c" acc_set_device_num(__MACC_TOPOLOGY[i], acc_device_nvidia); } # 61 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTable * __MACC_DATA_TABLE_SET; # 74 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataWrapCache * __MACC_DATA_WRAP_CACHE_SET; # 76 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_data_table_insert(int gpu_num, void * ptr, int type_size, int entire_lb, int entire_ub) { # 79 "/tmp/tmp.rutwVzhqmN/1.c" int index = (((long)(ptr)) / (16)) % (256); # 81 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * new_entry = malloc_managed(sizeof(struct __MaccDataTableEntry)); # 83 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->addr) = ptr; # 84 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->addr_ub) = (ptr + (entire_ub * type_size)); # 85 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->type_size) = type_size; # 86 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->entire_lb) = entire_lb; # 87 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->entire_ub) = entire_ub; # 88 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->dirty) = (0); # 89 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->dirty_lb) = (-(1)); # 90 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->dirty_ub) = (-(1)); # 91 "/tmp/tmp.rutwVzhqmN/1.c" (new_entry->next) = (*(((__MACC_DATA_TABLE_SET + gpu_num)->entries) + index)); # 93 "/tmp/tmp.rutwVzhqmN/1.c" (*(((__MACC_DATA_TABLE_SET + gpu_num)->entries) + index)) = new_entry; } # 96 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * __macc_data_table_find(int gpu_num, void * ptr) { # 98 "/tmp/tmp.rutwVzhqmN/1.c" int index = (((long)(ptr)) / (16)) % (256); # 99 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * entry = *(((__MACC_DATA_TABLE_SET + gpu_num)->entries) + index); # 101 "/tmp/tmp.rutwVzhqmN/1.c" while(entry != ((void * )(0))) { { # 102 "/tmp/tmp.rutwVzhqmN/1.c" if((entry->addr) == ptr) { # 103 "/tmp/tmp.rutwVzhqmN/1.c" (entry->offset) = (0); # 104 "/tmp/tmp.rutwVzhqmN/1.c" return entry; } # 107 "/tmp/tmp.rutwVzhqmN/1.c" entry = (entry->next); } } { # 110 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataWrapCache wrap_cache = __MACC_DATA_WRAP_CACHE_SET[gpu_num]; # 111 "/tmp/tmp.rutwVzhqmN/1.c" int lane = (((long)(ptr)) / (16)) % (16); { # 113 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 113 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < (*(((&(wrap_cache))->cachenum) + lane)); i++) { { # 114 "/tmp/tmp.rutwVzhqmN/1.c" if(ptr == (*(((&(wrap_cache))->addr) + ((lane * (16)) + i)))) { # 115 "/tmp/tmp.rutwVzhqmN/1.c" entry = (*(((&(wrap_cache))->entry) + ((lane * (16)) + i))); # 116 "/tmp/tmp.rutwVzhqmN/1.c" (entry->offset) = (*(((&(wrap_cache))->offset) + ((lane * (16)) + i))); # 117 "/tmp/tmp.rutwVzhqmN/1.c" return entry; } } } } { # 121 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 121 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < (256); i++) { { # 122 "/tmp/tmp.rutwVzhqmN/1.c" entry = (*(((__MACC_DATA_TABLE_SET + gpu_num)->entries) + i)); # 124 "/tmp/tmp.rutwVzhqmN/1.c" while(entry != ((void * )(0))) { { # 125 "/tmp/tmp.rutwVzhqmN/1.c" if(((entry->addr) <= ptr) && (ptr <= (entry->addr_ub))) { # 126 "/tmp/tmp.rutwVzhqmN/1.c" int offset = (ptr - (entry->addr)) / (entry->type_size); # 128 "/tmp/tmp.rutwVzhqmN/1.c" int cachenum = *(((&(wrap_cache))->cachenum) + lane); # 130 "/tmp/tmp.rutwVzhqmN/1.c" if(cachenum == (16)) { # 131 "/tmp/tmp.rutwVzhqmN/1.c" cachenum = (0); } # 134 "/tmp/tmp.rutwVzhqmN/1.c" (*(((&(wrap_cache))->addr) + ((lane * (16)) + cachenum))) = (entry->addr); # 135 "/tmp/tmp.rutwVzhqmN/1.c" (*(((&(wrap_cache))->entry) + ((lane * (16)) + cachenum))) = entry; # 136 "/tmp/tmp.rutwVzhqmN/1.c" (*(((&(wrap_cache))->offset) + ((lane * (16)) + cachenum))) = offset; # 138 "/tmp/tmp.rutwVzhqmN/1.c" (*(((&(wrap_cache))->cachenum) + lane)) = (cachenum + (1)); # 140 "/tmp/tmp.rutwVzhqmN/1.c" (entry->offset) = offset; # 141 "/tmp/tmp.rutwVzhqmN/1.c" return entry; } # 144 "/tmp/tmp.rutwVzhqmN/1.c" entry = (entry->next); } } } } } # 148 "/tmp/tmp.rutwVzhqmN/1.c" fprintf(stderr, "Error on __macc_data_table_find: Not found the item %p\n", ptr); # 149 "/tmp/tmp.rutwVzhqmN/1.c" exit(-(1)); # 151 "/tmp/tmp.rutwVzhqmN/1.c" return (void * )(0); } } # 154 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_data_table_delete(int gpu_num, void * ptr) { # 156 "/tmp/tmp.rutwVzhqmN/1.c" int index = (((long)(ptr)) / (16)) % (256); # 157 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * entry = *(((__MACC_DATA_TABLE_SET + gpu_num)->entries) + index); # 158 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * pre = (void * )(0); # 160 "/tmp/tmp.rutwVzhqmN/1.c" memset((__MACC_DATA_WRAP_CACHE_SET + gpu_num)->cachenum, 0, (16) * (sizeof(int))); # 162 "/tmp/tmp.rutwVzhqmN/1.c" if(entry != ((void * )(0))) { # 163 "/tmp/tmp.rutwVzhqmN/1.c" if((entry->addr) == ptr) { # 164 "/tmp/tmp.rutwVzhqmN/1.c" (*(((__MACC_DATA_TABLE_SET + gpu_num)->entries) + index)) = (entry->next); # 165 "/tmp/tmp.rutwVzhqmN/1.c" free_managed(entry); # 166 "/tmp/tmp.rutwVzhqmN/1.c" return ; } # 169 "/tmp/tmp.rutwVzhqmN/1.c" pre = entry; # 170 "/tmp/tmp.rutwVzhqmN/1.c" entry = (entry->next); } # 173 "/tmp/tmp.rutwVzhqmN/1.c" while((pre != ((void * )(0))) && (entry != ((void * )(0)))) { { # 174 "/tmp/tmp.rutwVzhqmN/1.c" if((entry->addr) == ptr) { # 175 "/tmp/tmp.rutwVzhqmN/1.c" (pre->next) = (entry->next); # 176 "/tmp/tmp.rutwVzhqmN/1.c" free_managed(entry); # 177 "/tmp/tmp.rutwVzhqmN/1.c" return ; } # 180 "/tmp/tmp.rutwVzhqmN/1.c" pre = entry; # 181 "/tmp/tmp.rutwVzhqmN/1.c" entry = (entry->next); } } # 184 "/tmp/tmp.rutwVzhqmN/1.c" fprintf(stderr, "Error on __macc_data_table_delete: Not found the item %p\n", ptr); # 185 "/tmp/tmp.rutwVzhqmN/1.c" exit(-(1)); } # 188 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_delete(int gpu_num, void * ptr, int type_size, int lb, int length) { # 190 "/tmp/tmp.rutwVzhqmN/1.c" acc_delete_async(ptr + (lb * type_size), length * type_size, gpu_num); # 191 "/tmp/tmp.rutwVzhqmN/1.c" __macc_data_table_delete(gpu_num, ptr); # 192 "/tmp/tmp.rutwVzhqmN/1.c" acc_wait(gpu_num); } # 195 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_copyout(int gpu_num, void * ptr, int type_size, int lb, int length) { # 197 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * entry = __macc_data_table_find(gpu_num, ptr); # 199 "/tmp/tmp.rutwVzhqmN/1.c" if(entry->dirty) { # 200 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_self_async((entry->addr) + ((entry->dirty_lb) * (entry->type_size)), (((entry->dirty_ub) - (entry->dirty_lb)) + (1)) * (entry->type_size), gpu_num); } # 204 "/tmp/tmp.rutwVzhqmN/1.c" __macc_delete(gpu_num, ptr, type_size, lb, length); } # 207 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_copyin(int gpu_num, void * ptr, int type_size, int lb, int length) { # 209 "/tmp/tmp.rutwVzhqmN/1.c" acc_copyin_async(ptr + (lb * type_size), length * type_size, gpu_num); # 210 "/tmp/tmp.rutwVzhqmN/1.c" __macc_data_table_insert(gpu_num, ptr, type_size, lb, (lb + length) - (1)); # 211 "/tmp/tmp.rutwVzhqmN/1.c" acc_wait(gpu_num); } # 214 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_create(int gpu_num, void * ptr, int type_size, int lb, int length) { # 216 "/tmp/tmp.rutwVzhqmN/1.c" acc_create_async(ptr + (lb * type_size), length * type_size, gpu_num); # 217 "/tmp/tmp.rutwVzhqmN/1.c" __macc_data_table_insert(gpu_num, ptr, type_size, lb, (lb + length) - (1)); # 218 "/tmp/tmp.rutwVzhqmN/1.c" acc_wait(gpu_num); } # 221 "/tmp/tmp.rutwVzhqmN/1.c" void * __macc_malloc(unsigned long size) { # 223 "/tmp/tmp.rutwVzhqmN/1.c" void * ret = malloc_managed(size); #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { # 227 "/tmp/tmp.rutwVzhqmN/1.c" __macc_create(omp_get_thread_num(), ret, 1, 0, size); } # 230 "/tmp/tmp.rutwVzhqmN/1.c" return ret; } # 233 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_free(void * ptr) { #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { # 237 "/tmp/tmp.rutwVzhqmN/1.c" int gpu_num = omp_get_thread_num(); # 238 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * entry = __macc_data_table_find(gpu_num, ptr); # 240 "/tmp/tmp.rutwVzhqmN/1.c" __macc_delete(gpu_num, ptr, 1, 0, (entry->entire_ub) + (1)); } # 242 "/tmp/tmp.rutwVzhqmN/1.c" free_managed(ptr); } # 245 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_update_self(int gpu_num, void * ptr, int type_size, int lb, int length) { # 247 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * entry = __macc_data_table_find(gpu_num, ptr); # 248 "/tmp/tmp.rutwVzhqmN/1.c" ptr = (entry->addr); # 249 "/tmp/tmp.rutwVzhqmN/1.c" lb += (entry->offset); { # 250 "/tmp/tmp.rutwVzhqmN/1.c" int ub = (lb + length) - (1); # 252 "/tmp/tmp.rutwVzhqmN/1.c" if((entry->dirty) && (!(((entry->dirty_lb) > ub) || ((entry->dirty_ub) < lb)))) { # 253 "/tmp/tmp.rutwVzhqmN/1.c" int new_lb = ((entry->dirty_lb) > lb) ?(entry->dirty_lb) : lb; # 254 "/tmp/tmp.rutwVzhqmN/1.c" int new_ub = ((entry->dirty_ub) < ub) ?(entry->dirty_ub) : ub; # 255 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_self(ptr + (new_lb * type_size), ((new_ub - new_lb) + (1)) * type_size); } } } # 259 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_update_device(int gpu_num, void * ptr, int type_size, int lb, int length) { # 261 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_device(ptr + (lb * type_size), length * type_size); } # 264 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_init_access_region(int gpu_num, int * lb_set, int * ub_set) { # 266 "/tmp/tmp.rutwVzhqmN/1.c" (lb_set[gpu_num]) = (2147483647); # 267 "/tmp/tmp.rutwVzhqmN/1.c" (ub_set[gpu_num]) = (-(1)); } # 270 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_update_access_region(int gpu_num, int * lb_set, int * ub_set, int val) { # 272 "/tmp/tmp.rutwVzhqmN/1.c" (lb_set[gpu_num]) = (((lb_set[gpu_num]) < val) ?(lb_set[gpu_num]) : val); # 273 "/tmp/tmp.rutwVzhqmN/1.c" (ub_set[gpu_num]) = (((ub_set[gpu_num]) > val) ?(ub_set[gpu_num]) : val); } # 276 "/tmp/tmp.rutwVzhqmN/1.c" int __macc_region_is_overlapping(int * lb_set, int * ub_set) { { # 278 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 278 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < (__MACC_NUMGPUS - (1)); i++) { { { # 279 "/tmp/tmp.rutwVzhqmN/1.c" int j; # 279 "/tmp/tmp.rutwVzhqmN/1.c" for(j = (i + (1)); j < __MACC_NUMGPUS; j++) { { # 280 "/tmp/tmp.rutwVzhqmN/1.c" if(!(((lb_set[i]) > (ub_set[j])) || ((ub_set[i]) < (lb_set[j])))) { # 281 "/tmp/tmp.rutwVzhqmN/1.c" return 1; } } } } } } } # 283 "/tmp/tmp.rutwVzhqmN/1.c" return 0; } # 287 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_calc_loop_region(int * loop_lb_set, int * loop_ub_set, int entire_start, int entire_end, int step, int until_equal) { # 291 "/tmp/tmp.rutwVzhqmN/1.c" int tmp = entire_start + (step * ((entire_end - entire_start) / step)); # 292 "/tmp/tmp.rutwVzhqmN/1.c" entire_end = (tmp - ((until_equal || (entire_end != tmp)) ?(0) : step)); { # 294 "/tmp/tmp.rutwVzhqmN/1.c" int len = (entire_end - entire_start) + step; # 295 "/tmp/tmp.rutwVzhqmN/1.c" int width = (int)(((float)(len)) / __MACC_NUMGPUS); # 296 "/tmp/tmp.rutwVzhqmN/1.c" width -= (width % step); { # 297 "/tmp/tmp.rutwVzhqmN/1.c" int rem = (len - (width * __MACC_NUMGPUS)) / step; # 298 "/tmp/tmp.rutwVzhqmN/1.c" width -= step; { # 300 "/tmp/tmp.rutwVzhqmN/1.c" int pos = entire_start; { # 302 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 302 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < __MACC_NUMGPUS; i++) { { # 303 "/tmp/tmp.rutwVzhqmN/1.c" (loop_lb_set[i]) = pos; # 304 "/tmp/tmp.rutwVzhqmN/1.c" pos = ((width < (0)) ? pos : ((((pos + width) + ((i < rem) ? step : (0))) < entire_end) ?((pos + width) + ((i < rem) ? step : (0))) : entire_end)); # 305 "/tmp/tmp.rutwVzhqmN/1.c" (loop_ub_set[i]) = pos; # 306 "/tmp/tmp.rutwVzhqmN/1.c" pos = (((pos + step) < entire_end) ?(pos + step) : entire_end); } } } } } } } # 310 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_adjust_data_region(void * ptr, int gpu_num, int * lb_set, int * ub_set) { # 312 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * entry = __macc_data_table_find(gpu_num, ptr); # 314 "/tmp/tmp.rutwVzhqmN/1.c" (lb_set[gpu_num]) += (entry->offset); # 315 "/tmp/tmp.rutwVzhqmN/1.c" (ub_set[gpu_num]) += (entry->offset); } # 318 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_rewrite_loop_region_into_single(int * loop_lb_set, int * loop_ub_set) { # 320 "/tmp/tmp.rutwVzhqmN/1.c" (loop_ub_set[(0)]) = (loop_ub_set[(__MACC_NUMGPUS - (1))]); { # 322 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 322 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (1); i < __MACC_NUMGPUS; i++) { { # 323 "/tmp/tmp.rutwVzhqmN/1.c" (loop_lb_set[i]) = (1); # 324 "/tmp/tmp.rutwVzhqmN/1.c" (loop_ub_set[i]) = (0); } } } } # 328 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_rewrite_data_region_into_single(int * lb_set, int * ub_set) { # 330 "/tmp/tmp.rutwVzhqmN/1.c" int gpu_ub = __MACC_NUMGPUS - (1); # 331 "/tmp/tmp.rutwVzhqmN/1.c" (lb_set[(0)]) = (((lb_set[(0)]) < (lb_set[gpu_ub])) ?(lb_set[(0)]) : (lb_set[gpu_ub])); # 332 "/tmp/tmp.rutwVzhqmN/1.c" (ub_set[(0)]) = (((ub_set[(0)]) > (ub_set[gpu_ub])) ?(ub_set[(0)]) : (ub_set[gpu_ub])); } # 335 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_sync_data(int gpu_num, void * ptr, int type_size, int lb, int ub) { # 337 "/tmp/tmp.rutwVzhqmN/1.c" void * update_addr = ptr + (lb * type_size); # 338 "/tmp/tmp.rutwVzhqmN/1.c" unsigned long length_b = ((ub - lb) + (1)) * type_size; # 340 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_self(update_addr, length_b); { # 343 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 343 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < __MACC_NUMGPUS; i++) { { # 346 "/tmp/tmp.rutwVzhqmN/1.c" if(i != gpu_num) { # 347 "/tmp/tmp.rutwVzhqmN/1.c" __macc_set_gpu_num(i); # 348 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_device(update_addr, length_b); } } } } # 352 "/tmp/tmp.rutwVzhqmN/1.c" __macc_set_gpu_num(gpu_num); } # 356 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_set_data_region(int gpu_num, void * ptr, int multi, int use_type, int * use_lb_set, int * use_ub_set, int def_type, int * def_lb_set, int * def_ub_set) { # 360 "/tmp/tmp.rutwVzhqmN/1.c" struct __MaccDataTableEntry * entry = __macc_data_table_find(gpu_num, ptr); # 361 "/tmp/tmp.rutwVzhqmN/1.c" ptr = (entry->addr); # 366 "/tmp/tmp.rutwVzhqmN/1.c" if(((entry->dirty) && (multi || (gpu_num != (0)))) && (__MACC_NUMGPUS > (1))) { # 367 "/tmp/tmp.rutwVzhqmN/1.c" int update_all = 0; # 368 "/tmp/tmp.rutwVzhqmN/1.c" int update_all_DtoH = 0; # 370 "/tmp/tmp.rutwVzhqmN/1.c" if((use_type == (0)) || (def_type == (0))) { # 371 "/tmp/tmp.rutwVzhqmN/1.c" update_all = (1); } else { # 373 "/tmp/tmp.rutwVzhqmN/1.c" if(def_type == (2)) { { # 374 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 374 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < __MACC_NUMGPUS; i++) { { # 375 "/tmp/tmp.rutwVzhqmN/1.c" if((i != gpu_num) && (!(((entry->dirty_lb) > (def_ub_set[i])) || ((entry->dirty_ub) < (def_lb_set[i]))))) { # 378 "/tmp/tmp.rutwVzhqmN/1.c" update_all = (1); # 379 "/tmp/tmp.rutwVzhqmN/1.c" break; } } } } } } # 384 "/tmp/tmp.rutwVzhqmN/1.c" if(! update_all) { # 385 "/tmp/tmp.rutwVzhqmN/1.c" int every_whole = 1; # 386 "/tmp/tmp.rutwVzhqmN/1.c" int unused_lb = entry->dirty_lb; # 387 "/tmp/tmp.rutwVzhqmN/1.c" int unused_ub = entry->dirty_ub; { # 389 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 389 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < __MACC_NUMGPUS; i++) { { # 390 "/tmp/tmp.rutwVzhqmN/1.c" if(i != gpu_num) { # 391 "/tmp/tmp.rutwVzhqmN/1.c" if(((use_lb_set[i]) <= (entry->dirty_lb)) && ((entry->dirty_ub) <= (use_ub_set[i]))) { # 393 "/tmp/tmp.rutwVzhqmN/1.c" update_all_DtoH = (1); } else { # 396 "/tmp/tmp.rutwVzhqmN/1.c" every_whole = (0); # 398 "/tmp/tmp.rutwVzhqmN/1.c" if((use_lb_set[i]) <= unused_lb) { # 399 "/tmp/tmp.rutwVzhqmN/1.c" unused_lb = ((unused_lb > ((use_ub_set[i]) + (1))) ? unused_lb : ((use_ub_set[i]) + (1))); } else { # 400 "/tmp/tmp.rutwVzhqmN/1.c" if((use_ub_set[i]) >= unused_ub) { # 401 "/tmp/tmp.rutwVzhqmN/1.c" unused_ub = ((unused_ub < ((use_lb_set[i]) - (1))) ? unused_ub : ((use_lb_set[i]) - (1))); } } } } } } } # 406 "/tmp/tmp.rutwVzhqmN/1.c" if(every_whole) { # 407 "/tmp/tmp.rutwVzhqmN/1.c" update_all = (1); } # 408 "/tmp/tmp.rutwVzhqmN/1.c" if(unused_ub < unused_lb) { # 409 "/tmp/tmp.rutwVzhqmN/1.c" update_all_DtoH = (1); } } # 413 "/tmp/tmp.rutwVzhqmN/1.c" if(update_all) { # 414 "/tmp/tmp.rutwVzhqmN/1.c" __macc_sync_data(gpu_num, ptr, entry->type_size, entry->dirty_lb, entry->dirty_ub); # 415 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty) = (0); } else { # 419 "/tmp/tmp.rutwVzhqmN/1.c" if((entry->dirty) && (use_type == (2))) { # 420 "/tmp/tmp.rutwVzhqmN/1.c" int thread_num = multi ? __MACC_NUMGPUS : (1); # 422 "/tmp/tmp.rutwVzhqmN/1.c" if(update_all_DtoH) { # 423 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_self(ptr + ((entry->dirty_lb) * (entry->type_size)), (((entry->dirty_ub) - (entry->dirty_lb)) + (1)) * (entry->type_size)); } { # 427 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 427 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < thread_num; i++) { { # 431 "/tmp/tmp.rutwVzhqmN/1.c" if((i != gpu_num) && (!(((entry->dirty_lb) > (use_ub_set[i])) || ((entry->dirty_ub) < (use_lb_set[i]))))) { # 435 "/tmp/tmp.rutwVzhqmN/1.c" int update_lb = ((entry->dirty_lb) > (use_lb_set[i])) ?(entry->dirty_lb) : (use_lb_set[i]); # 436 "/tmp/tmp.rutwVzhqmN/1.c" int update_ub = ((entry->dirty_ub) < (use_ub_set[i])) ?(entry->dirty_ub) : (use_ub_set[i]); # 437 "/tmp/tmp.rutwVzhqmN/1.c" void * update_addr = ptr + (update_lb * (entry->type_size)); # 438 "/tmp/tmp.rutwVzhqmN/1.c" unsigned long length_b = ((update_ub - update_lb) + (1)) * (entry->type_size); # 440 "/tmp/tmp.rutwVzhqmN/1.c" if(! update_all_DtoH) { # 441 "/tmp/tmp.rutwVzhqmN/1.c" __macc_set_gpu_num(gpu_num); # 442 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_self(update_addr, length_b); } # 444 "/tmp/tmp.rutwVzhqmN/1.c" __macc_set_gpu_num(i); # 445 "/tmp/tmp.rutwVzhqmN/1.c" acc_update_device(update_addr, length_b); } } } } # 449 "/tmp/tmp.rutwVzhqmN/1.c" __macc_set_gpu_num(gpu_num); } } } # 458 "/tmp/tmp.rutwVzhqmN/1.c" if((multi || (gpu_num == (0))) && (def_type != (1))) { # 459 "/tmp/tmp.rutwVzhqmN/1.c" if(def_type == (0)) { # 460 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty) = (1); # 461 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_lb) = (entry->entire_lb); # 462 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_ub) = (entry->entire_ub); } else { # 465 "/tmp/tmp.rutwVzhqmN/1.c" if(!(entry->dirty)) { # 466 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty) = (1); # 467 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_lb) = (def_lb_set[gpu_num]); # 468 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_ub) = (def_ub_set[gpu_num]); } else { # 473 "/tmp/tmp.rutwVzhqmN/1.c" if(((!(((entry->dirty_lb) > (def_ub_set[gpu_num])) || ((entry->dirty_ub) < (def_lb_set[gpu_num])))) || ((entry->dirty_lb) == ((def_ub_set[gpu_num]) + (1)))) || ((def_lb_set[gpu_num]) == ((entry->dirty_ub) + (1)))) { # 481 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_lb) = (((entry->dirty_lb) < (def_lb_set[gpu_num])) ?(entry->dirty_lb) : (def_lb_set[gpu_num])); # 482 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_ub) = (((entry->dirty_ub) > (def_ub_set[gpu_num])) ?(entry->dirty_ub) : (def_ub_set[gpu_num])); } else { # 486 "/tmp/tmp.rutwVzhqmN/1.c" __macc_sync_data(gpu_num, ptr, entry->type_size, entry->dirty_lb, entry->dirty_ub); # 487 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_lb) = (def_lb_set[gpu_num]); # 488 "/tmp/tmp.rutwVzhqmN/1.c" (entry->dirty_ub) = (def_ub_set[gpu_num]); } } } } } # 493 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_set_data_region_multi(int gpu_num, int multi, int len, void * * ptrs, int * use_type, int * * use_lb_set, int * * use_ub_set, int * def_type, int * * def_lb_set, int * * def_ub_set) { { # 499 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 499 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < len; i++) { { # 502 "/tmp/tmp.rutwVzhqmN/1.c" int tnum = i; # 504 "/tmp/tmp.rutwVzhqmN/1.c" __macc_set_gpu_num(gpu_num); # 506 "/tmp/tmp.rutwVzhqmN/1.c" __macc_set_data_region(gpu_num, ptrs[tnum], multi, use_type[tnum], use_lb_set[tnum], use_ub_set[tnum], def_type[tnum], def_lb_set[tnum], def_ub_set[tnum]); } } } } # 513 "/tmp/tmp.rutwVzhqmN/1.c" void __macc_init() { # 515 "/tmp/tmp.rutwVzhqmN/1.c" char * env_macc_numgpus = getenv("MACC_NUMGPUS"); # 517 "/tmp/tmp.rutwVzhqmN/1.c" if(env_macc_numgpus != ((void * )(0))) { # 518 "/tmp/tmp.rutwVzhqmN/1.c" __MACC_NUMGPUS = (atoi(env_macc_numgpus)); } else { # 521 "/tmp/tmp.rutwVzhqmN/1.c" __MACC_NUMGPUS = (__macc_get_num_gpus()); } # 524 "/tmp/tmp.rutwVzhqmN/1.c" if(__MACC_NUMGPUS <= (0)) { # 525 "/tmp/tmp.rutwVzhqmN/1.c" fputs("[MACC ERROR] No GPU device found.", stderr); # 526 "/tmp/tmp.rutwVzhqmN/1.c" exit(-(1)); } { # 529 "/tmp/tmp.rutwVzhqmN/1.c" char * topo = getenv("MACC_TOPOLOGY"); # 531 "/tmp/tmp.rutwVzhqmN/1.c" if(topo != ((void * )(0))) { # 532 "/tmp/tmp.rutwVzhqmN/1.c" int i = 0; # 533 "/tmp/tmp.rutwVzhqmN/1.c" topo = (strtok(topo, ",")); # 534 "/tmp/tmp.rutwVzhqmN/1.c" while(topo != ((void * )(0))) { { # 535 "/tmp/tmp.rutwVzhqmN/1.c" (__MACC_TOPOLOGY[i]) = (atoi(topo)); # 536 "/tmp/tmp.rutwVzhqmN/1.c" topo = (strtok((void * )(0), ",")); # 537 "/tmp/tmp.rutwVzhqmN/1.c" i++; } } } else { { # 540 "/tmp/tmp.rutwVzhqmN/1.c" int i; # 540 "/tmp/tmp.rutwVzhqmN/1.c" for(i = (0); i < __MACC_NUMGPUS; i++) { { # 541 "/tmp/tmp.rutwVzhqmN/1.c" (__MACC_TOPOLOGY[i]) = i; } } } } # 557 "/tmp/tmp.rutwVzhqmN/1.c" __MACC_DATA_TABLE_SET = (calloc_managed(__MACC_NUMGPUS, sizeof(struct __MaccDataTable))); # 558 "/tmp/tmp.rutwVzhqmN/1.c" __MACC_DATA_WRAP_CACHE_SET = (calloc_managed(__MACC_NUMGPUS, sizeof(struct __MaccDataWrapCache))); { # 561 "/tmp/tmp.rutwVzhqmN/1.c" int t; # 561 "/tmp/tmp.rutwVzhqmN/1.c" for(t = (0); t < (10); t++) { { # 562 "/tmp/tmp.rutwVzhqmN/1.c" printf("[MACC] Wake up (%d)\n", t); { # 564 "/tmp/tmp.rutwVzhqmN/1.c" int n = ((256) * (1024)) * (1024); # 565 "/tmp/tmp.rutwVzhqmN/1.c" int * tmp = malloc_managed(n * (sizeof(int))); { #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { __macc_copyin(__macc_tnum, tmp, sizeof(int), 0, n); } } { { static int __macc_region_is_changed = 1; static int __macc_multi = 1; static void * __macc_ptrs[1]; static int __macc_use_types[1]; static int * __macc_use_lb_sets[1]; static int * __macc_use_ub_sets[1]; static int __macc_def_types[1]; static int * __macc_def_lb_sets[1]; static int * __macc_def_ub_sets[1]; static int __macc_tmp_def_ub_set[10]; static int __macc_tmp_def_lb_set[10]; static int __macc_tmp_use_ub_set[10]; static int __macc_tmp_use_lb_set[10]; static int __macc_n_last; static int __macc_i_loop_lb_set[10]; static int __macc_i_loop_ub_set[10]; __macc_region_is_changed = (__macc_region_is_changed || (n != __macc_n_last)); if(__macc_region_is_changed) { __macc_multi = (1); __macc_region_is_changed = (0); { __macc_n_last = n; } { __macc_calc_loop_region(__macc_i_loop_lb_set, __macc_i_loop_ub_set, 1, n - (1), 1, 1); } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_gpu_num; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_gpu_num = (omp_get_thread_num()); { __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_gpu_num]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_gpu_num]); } { { { __macc_init_access_region(__macc_gpu_num, __macc_tmp_use_lb_set, __macc_tmp_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set); { } { __macc_update_access_region(__macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set, __macc_top_loop_lb); __macc_update_access_region(__macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set, __macc_top_loop_ub); } __macc_adjust_data_region(tmp, __macc_gpu_num, __macc_tmp_use_lb_set, __macc_tmp_use_ub_set); __macc_adjust_data_region(tmp, __macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set); } (__macc_ptrs[0]) = tmp; (__macc_use_types[0]) = (1); (__macc_use_lb_sets[0]) = __macc_tmp_use_lb_set; (__macc_use_ub_sets[0]) = __macc_tmp_use_ub_set; (__macc_def_types[0]) = (2); (__macc_def_lb_sets[0]) = __macc_tmp_def_lb_set; (__macc_def_ub_sets[0]) = __macc_tmp_def_ub_set; } } } if(__macc_region_is_overlapping(__macc_tmp_def_lb_set, __macc_tmp_def_ub_set)) { __macc_multi = (0); { __macc_rewrite_loop_region_into_single(__macc_i_loop_lb_set, __macc_i_loop_ub_set); { __macc_rewrite_data_region_into_single(__macc_tmp_def_lb_set, __macc_tmp_def_ub_set); } } } } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { int __macc_num_gangs; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_tnum]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_tnum]); __macc_set_data_region_multi(__macc_tnum, __macc_multi, 1, __macc_ptrs, __macc_use_types, __macc_use_lb_sets, __macc_use_ub_sets, __macc_def_types, __macc_def_lb_sets, __macc_def_ub_sets); #pragma omp barrier __macc_num_gangs = ( __macc_multi ? (((512) + __MACC_NUMGPUS) - (1)) / __MACC_NUMGPUS : 512 ); #pragma acc parallel present ( tmp ) num_gangs (__macc_num_gangs) vector_length ( 1024 ) #pragma acc loop gang vector # 571 "/tmp/tmp.rutwVzhqmN/1.c" # 571 "/tmp/tmp.rutwVzhqmN/1.c" for(int i= __macc_top_loop_lb; i <= __macc_top_loop_ub; i++) { { # 572 "/tmp/tmp.rutwVzhqmN/1.c" (tmp[i]) = i; } } } } } { static int __macc_region_is_changed = 1; static int __macc_multi = 1; static void * __macc_ptrs[1]; static int __macc_use_types[1]; static int * __macc_use_lb_sets[1]; static int * __macc_use_ub_sets[1]; static int __macc_def_types[1]; static int * __macc_def_lb_sets[1]; static int * __macc_def_ub_sets[1]; static int __macc_tmp_def_ub_set[10]; static int __macc_tmp_def_lb_set[10]; static int __macc_tmp_use_ub_set[10]; static int __macc_tmp_use_lb_set[10]; static int __macc_n_last; static int __macc_i_loop_lb_set[10]; static int __macc_i_loop_ub_set[10]; __macc_region_is_changed = (__macc_region_is_changed || (n != __macc_n_last)); if(__macc_region_is_changed) { __macc_multi = (1); __macc_region_is_changed = (0); { __macc_n_last = n; } { __macc_calc_loop_region(__macc_i_loop_lb_set, __macc_i_loop_ub_set, 1, n - (1), 1, 1); } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_gpu_num; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_gpu_num = (omp_get_thread_num()); { __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_gpu_num]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_gpu_num]); } { { { __macc_init_access_region(__macc_gpu_num, __macc_tmp_use_lb_set, __macc_tmp_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_tmp_use_lb_set, __macc_tmp_use_ub_set, n - __macc_top_loop_lb); __macc_update_access_region(__macc_gpu_num, __macc_tmp_use_lb_set, __macc_tmp_use_ub_set, n - __macc_top_loop_ub); } { __macc_update_access_region(__macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set, n - __macc_top_loop_lb); __macc_update_access_region(__macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set, n - __macc_top_loop_ub); } __macc_adjust_data_region(tmp, __macc_gpu_num, __macc_tmp_use_lb_set, __macc_tmp_use_ub_set); __macc_adjust_data_region(tmp, __macc_gpu_num, __macc_tmp_def_lb_set, __macc_tmp_def_ub_set); } (__macc_ptrs[0]) = tmp; (__macc_use_types[0]) = (2); (__macc_use_lb_sets[0]) = __macc_tmp_use_lb_set; (__macc_use_ub_sets[0]) = __macc_tmp_use_ub_set; (__macc_def_types[0]) = (2); (__macc_def_lb_sets[0]) = __macc_tmp_def_lb_set; (__macc_def_ub_sets[0]) = __macc_tmp_def_ub_set; } } } if(__macc_region_is_overlapping(__macc_tmp_def_lb_set, __macc_tmp_def_ub_set)) { __macc_multi = (0); { __macc_rewrite_loop_region_into_single(__macc_i_loop_lb_set, __macc_i_loop_ub_set); { __macc_rewrite_data_region_into_single(__macc_tmp_use_lb_set, __macc_tmp_use_ub_set); __macc_rewrite_data_region_into_single(__macc_tmp_def_lb_set, __macc_tmp_def_ub_set); } } } } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { int __macc_num_gangs; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_tnum]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_tnum]); __macc_set_data_region_multi(__macc_tnum, __macc_multi, 1, __macc_ptrs, __macc_use_types, __macc_use_lb_sets, __macc_use_ub_sets, __macc_def_types, __macc_def_lb_sets, __macc_def_ub_sets); #pragma omp barrier __macc_num_gangs = ( __macc_multi ? (((512) + __MACC_NUMGPUS) - (1)) / __MACC_NUMGPUS : 512 ); #pragma acc parallel present ( tmp ) num_gangs (__macc_num_gangs) vector_length ( 1024 ) #pragma acc loop gang vector # 576 "/tmp/tmp.rutwVzhqmN/1.c" # 576 "/tmp/tmp.rutwVzhqmN/1.c" for(int i= __macc_top_loop_lb; i <= __macc_top_loop_ub; i++) { { # 577 "/tmp/tmp.rutwVzhqmN/1.c" (tmp[(n - i)]) += i; } } } } } } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { __macc_copyout(__macc_tnum, tmp, sizeof(int), 0, n); } } } # 580 "/tmp/tmp.rutwVzhqmN/1.c" free_managed(tmp); } } } } } } extern void * malloc_managed(unsigned long); extern void * calloc_managed(unsigned long, unsigned long); extern void free_managed(void * ); extern void cfree_managed(void * ); extern void * realloc_managed(void * , unsigned long); extern void * valloc_managed(unsigned long); extern void * pvalloc_managed(unsigned long); extern void * memalign_managed(unsigned long, unsigned long); extern int posix_memalign_managed(void * * , unsigned long, unsigned long); extern char * tmpnam_managed(char * ); extern int gettimeofday(struct timeval * restrict __tv, struct timezone * restrict __tz); extern int settimeofday(struct timeval const * __tv, struct timezone const * __tz); extern int adjtime(struct timeval const * __delta, struct timeval * __olddelta); extern int getitimer(int __which, struct itimerval * __value); extern int setitimer(int __which, struct itimerval const * restrict __new, struct itimerval * restrict __old); extern int utimes(char const * __file, struct timeval const __tvp[2]); extern int lutimes(char const * __file, struct timeval const __tvp[2]); extern int futimes(int __fd, struct timeval const __tvp[2]); int newMat(struct Mat * Mat, int mnums, int mrows, int mcols, int mdeps); void clearMat(struct Mat * Mat); void set_param(int is[], char * size); void mat_set(struct Mat * Mat, int l, float val); void mat_set_init(struct Mat * Mat); float jacobi(int nn, struct Mat * a, struct Mat * b, struct Mat * c, struct Mat * p, struct Mat * bnd, struct Mat * wrk1, struct Mat * wrk2); double fflop(int mx, int my, int mz); double mflops(int nn, double cpu, double flop); # 32 "/tmp/tmp.rutwVzhqmN/in.c" double second() { # 35 "/tmp/tmp.rutwVzhqmN/in.c" struct timeval tm; # 36 "/tmp/tmp.rutwVzhqmN/in.c" double t; # 38 "/tmp/tmp.rutwVzhqmN/in.c" static int base_sec = 0; # 38 "/tmp/tmp.rutwVzhqmN/in.c" static int base_usec = 0; # 40 "/tmp/tmp.rutwVzhqmN/in.c" gettimeofday(&(tm), (void * )(0)); # 42 "/tmp/tmp.rutwVzhqmN/in.c" if((base_sec == (0)) && (base_usec == (0))) { # 44 "/tmp/tmp.rutwVzhqmN/in.c" base_sec = ((&(tm))->tv_sec); # 45 "/tmp/tmp.rutwVzhqmN/in.c" base_usec = ((&(tm))->tv_usec); # 46 "/tmp/tmp.rutwVzhqmN/in.c" t = (0.0); } else { # 48 "/tmp/tmp.rutwVzhqmN/in.c" t = (((double)(((&(tm))->tv_sec) - base_sec)) + (((double)(((&(tm))->tv_usec) - base_usec)) / (1.0e6))); } # 52 "/tmp/tmp.rutwVzhqmN/in.c" return t; } # 55 "/tmp/tmp.rutwVzhqmN/in.c" float omega = 0.8; # 56 "/tmp/tmp.rutwVzhqmN/in.c" struct Mat a; # 56 "/tmp/tmp.rutwVzhqmN/in.c" struct Mat b; # 56 "/tmp/tmp.rutwVzhqmN/in.c" struct Mat c; # 56 "/tmp/tmp.rutwVzhqmN/in.c" struct Mat p; # 56 "/tmp/tmp.rutwVzhqmN/in.c" struct Mat bnd; # 56 "/tmp/tmp.rutwVzhqmN/in.c" struct Mat wrk1; # 56 "/tmp/tmp.rutwVzhqmN/in.c" struct Mat wrk2; # 58 "/tmp/tmp.rutwVzhqmN/in.c" int main(int argc, char * argv[]) { __macc_init(); { # 61 "/tmp/tmp.rutwVzhqmN/in.c" int i; # 61 "/tmp/tmp.rutwVzhqmN/in.c" int j; # 61 "/tmp/tmp.rutwVzhqmN/in.c" int k; # 61 "/tmp/tmp.rutwVzhqmN/in.c" int nn; # 62 "/tmp/tmp.rutwVzhqmN/in.c" int imax; # 62 "/tmp/tmp.rutwVzhqmN/in.c" int jmax; # 62 "/tmp/tmp.rutwVzhqmN/in.c" int kmax; # 62 "/tmp/tmp.rutwVzhqmN/in.c" int mimax; # 62 "/tmp/tmp.rutwVzhqmN/in.c" int mjmax; # 62 "/tmp/tmp.rutwVzhqmN/in.c" int mkmax; # 62 "/tmp/tmp.rutwVzhqmN/in.c" int msize[3]; # 63 "/tmp/tmp.rutwVzhqmN/in.c" float gosa; # 63 "/tmp/tmp.rutwVzhqmN/in.c" float target; # 64 "/tmp/tmp.rutwVzhqmN/in.c" double cpu0; # 64 "/tmp/tmp.rutwVzhqmN/in.c" double cpu1; # 64 "/tmp/tmp.rutwVzhqmN/in.c" double cpu; # 64 "/tmp/tmp.rutwVzhqmN/in.c" double flop; # 65 "/tmp/tmp.rutwVzhqmN/in.c" char size[10]; # 66 "/tmp/tmp.rutwVzhqmN/in.c" unsigned long bs; # 67 "/tmp/tmp.rutwVzhqmN/in.c" float * a_m; # 67 "/tmp/tmp.rutwVzhqmN/in.c" float * b_m; # 67 "/tmp/tmp.rutwVzhqmN/in.c" float * c_m; # 67 "/tmp/tmp.rutwVzhqmN/in.c" float * p_m; # 67 "/tmp/tmp.rutwVzhqmN/in.c" float * bnd_m; # 67 "/tmp/tmp.rutwVzhqmN/in.c" float * wrk1_m; # 67 "/tmp/tmp.rutwVzhqmN/in.c" float * wrk2_m; # 69 "/tmp/tmp.rutwVzhqmN/in.c" if(argc == (2)) { # 70 "/tmp/tmp.rutwVzhqmN/in.c" strcpy(size, argv[1]); } else { # 72 "/tmp/tmp.rutwVzhqmN/in.c" printf("For example: \n"); # 73 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Grid-size= XS (32x32x64)\n"); # 74 "/tmp/tmp.rutwVzhqmN/in.c" printf("\t S (64x64x128)\n"); # 75 "/tmp/tmp.rutwVzhqmN/in.c" printf("\t M (128x128x256)\n"); # 76 "/tmp/tmp.rutwVzhqmN/in.c" printf("\t L (256x256x512)\n"); # 77 "/tmp/tmp.rutwVzhqmN/in.c" printf("\t XL (512x512x1024)\n\n"); # 78 "/tmp/tmp.rutwVzhqmN/in.c" printf("Grid-size = "); # 79 "/tmp/tmp.rutwVzhqmN/in.c" __isoc99_scanf("%s", size); # 80 "/tmp/tmp.rutwVzhqmN/in.c" printf("\n"); } # 83 "/tmp/tmp.rutwVzhqmN/in.c" set_param(msize, size); # 85 "/tmp/tmp.rutwVzhqmN/in.c" mimax = (msize[0]); # 86 "/tmp/tmp.rutwVzhqmN/in.c" mjmax = (msize[1]); # 87 "/tmp/tmp.rutwVzhqmN/in.c" mkmax = (msize[2]); # 88 "/tmp/tmp.rutwVzhqmN/in.c" imax = (mimax - (1)); # 89 "/tmp/tmp.rutwVzhqmN/in.c" jmax = (mjmax - (1)); # 90 "/tmp/tmp.rutwVzhqmN/in.c" kmax = (mkmax - (1)); # 91 "/tmp/tmp.rutwVzhqmN/in.c" bs = ((mimax * mjmax) * mkmax); # 93 "/tmp/tmp.rutwVzhqmN/in.c" target = (60.0); # 95 "/tmp/tmp.rutwVzhqmN/in.c" printf("mimax = %d mjmax = %d mkmax = %d\n", mimax, mjmax, mkmax); # 96 "/tmp/tmp.rutwVzhqmN/in.c" printf("imax = %d jmax = %d kmax =%d\n", imax, jmax, kmax); # 101 "/tmp/tmp.rutwVzhqmN/in.c" newMat(&(p), 1, mimax, mjmax, mkmax); # 102 "/tmp/tmp.rutwVzhqmN/in.c" newMat(&(bnd), 1, mimax, mjmax, mkmax); # 103 "/tmp/tmp.rutwVzhqmN/in.c" newMat(&(wrk1), 1, mimax, mjmax, mkmax); # 104 "/tmp/tmp.rutwVzhqmN/in.c" newMat(&(wrk2), 1, mimax, mjmax, mkmax); # 105 "/tmp/tmp.rutwVzhqmN/in.c" newMat(&(a), 4, mimax, mjmax, mkmax); # 106 "/tmp/tmp.rutwVzhqmN/in.c" newMat(&(b), 3, mimax, mjmax, mkmax); # 107 "/tmp/tmp.rutwVzhqmN/in.c" newMat(&(c), 3, mimax, mjmax, mkmax); # 109 "/tmp/tmp.rutwVzhqmN/in.c" mat_set_init(&(p)); # 110 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(bnd), 0, 1.0); # 111 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(wrk1), 0, 0.0); # 112 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(wrk2), 0, 0.0); # 113 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(a), 0, 1.0); # 114 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(a), 1, 1.0); # 115 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(a), 2, 1.0); # 116 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(a), 3, (1.0) / (6.0)); # 117 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(b), 0, 0.0); # 118 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(b), 1, 0.0); # 119 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(b), 2, 0.0); # 120 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(c), 0, 1.0); # 121 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(c), 1, 1.0); # 122 "/tmp/tmp.rutwVzhqmN/in.c" mat_set(&(c), 2, 1.0); # 128 "/tmp/tmp.rutwVzhqmN/in.c" a_m = ((&(a))->m); # 129 "/tmp/tmp.rutwVzhqmN/in.c" b_m = ((&(b))->m); # 130 "/tmp/tmp.rutwVzhqmN/in.c" c_m = ((&(c))->m); # 131 "/tmp/tmp.rutwVzhqmN/in.c" p_m = ((&(p))->m); # 132 "/tmp/tmp.rutwVzhqmN/in.c" bnd_m = ((&(bnd))->m); # 133 "/tmp/tmp.rutwVzhqmN/in.c" wrk1_m = ((&(wrk1))->m); # 134 "/tmp/tmp.rutwVzhqmN/in.c" wrk2_m = ((&(wrk2))->m); { #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { __macc_copyin(__macc_tnum, p_m, sizeof(float), 0, bs); } } { #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { __macc_copyin(__macc_tnum, a_m, sizeof(float), 0, bs * (4)); __macc_copyin(__macc_tnum, b_m, sizeof(float), 0, bs * (3)); __macc_copyin(__macc_tnum, c_m, sizeof(float), 0, bs * (3)); __macc_copyin(__macc_tnum, bnd_m, sizeof(float), 0, bs); __macc_copyin(__macc_tnum, wrk1_m, sizeof(float), 0, bs); __macc_copyin(__macc_tnum, wrk2_m, sizeof(float), 0, bs); } } { # 141 "/tmp/tmp.rutwVzhqmN/in.c" jacobi(1, &(a), &(b), &(c), &(p), &(bnd), &(wrk1), &(wrk2)); # 143 "/tmp/tmp.rutwVzhqmN/in.c" nn = (3); # 144 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Start rehearsal measurement process.\n"); # 145 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Measure the performance in %d times.\n\n", nn); # 147 "/tmp/tmp.rutwVzhqmN/in.c" cpu0 = (second()); # 148 "/tmp/tmp.rutwVzhqmN/in.c" gosa = (jacobi(nn, &(a), &(b), &(c), &(p), &(bnd), &(wrk1), &(wrk2))); # 149 "/tmp/tmp.rutwVzhqmN/in.c" cpu1 = (second()); # 150 "/tmp/tmp.rutwVzhqmN/in.c" cpu = (cpu1 - cpu0); # 151 "/tmp/tmp.rutwVzhqmN/in.c" flop = (fflop(imax, jmax, kmax)); # 153 "/tmp/tmp.rutwVzhqmN/in.c" printf(" MFLOPS: %f time(s): %f %e\n\n", mflops(nn, cpu, flop), cpu, gosa); # 156 "/tmp/tmp.rutwVzhqmN/in.c" nn = ((int)(target / (cpu / (3.0)))); # 158 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Now, start the actual measurement process.\n"); # 159 "/tmp/tmp.rutwVzhqmN/in.c" printf(" The loop will be excuted in %d times\n", nn); # 160 "/tmp/tmp.rutwVzhqmN/in.c" printf(" This will take about one minute.\n"); # 161 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Wait for a while\n\n"); # 163 "/tmp/tmp.rutwVzhqmN/in.c" cpu0 = (second()); # 164 "/tmp/tmp.rutwVzhqmN/in.c" gosa = (jacobi(nn, &(a), &(b), &(c), &(p), &(bnd), &(wrk1), &(wrk2))); # 165 "/tmp/tmp.rutwVzhqmN/in.c" cpu1 = (second()); # 166 "/tmp/tmp.rutwVzhqmN/in.c" cpu = (cpu1 - cpu0); # 168 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Loop executed for %d times\n", nn); # 169 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Gosa : %e \n", gosa); # 170 "/tmp/tmp.rutwVzhqmN/in.c" printf(" MFLOPS measured : %f\tcpu : %f\n", mflops(nn, cpu, flop), cpu); # 171 "/tmp/tmp.rutwVzhqmN/in.c" printf(" Score based on Pentium III 600MHz using Fortran 77: %f\n", (mflops(nn, cpu, flop)) / (82), 84); } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { __macc_delete(__macc_tnum, a_m, sizeof(float), 0, bs * (4)); __macc_delete(__macc_tnum, b_m, sizeof(float), 0, bs * (3)); __macc_delete(__macc_tnum, c_m, sizeof(float), 0, bs * (3)); __macc_delete(__macc_tnum, bnd_m, sizeof(float), 0, bs); __macc_delete(__macc_tnum, wrk1_m, sizeof(float), 0, bs); __macc_delete(__macc_tnum, wrk2_m, sizeof(float), 0, bs); } } } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { __macc_copyout(__macc_tnum, p_m, sizeof(float), 0, bs); } } } # 178 "/tmp/tmp.rutwVzhqmN/in.c" clearMat(&(p)); # 179 "/tmp/tmp.rutwVzhqmN/in.c" clearMat(&(bnd)); # 180 "/tmp/tmp.rutwVzhqmN/in.c" clearMat(&(wrk1)); # 181 "/tmp/tmp.rutwVzhqmN/in.c" clearMat(&(wrk2)); # 182 "/tmp/tmp.rutwVzhqmN/in.c" clearMat(&(a)); # 183 "/tmp/tmp.rutwVzhqmN/in.c" clearMat(&(b)); # 184 "/tmp/tmp.rutwVzhqmN/in.c" clearMat(&(c)); # 186 "/tmp/tmp.rutwVzhqmN/in.c" return 0; } } # 189 "/tmp/tmp.rutwVzhqmN/in.c" double fflop(int mx, int my, int mz) { # 192 "/tmp/tmp.rutwVzhqmN/in.c" return ((((double)(mz - (2))) * ((double)(my - (2)))) * ((double)(mx - (2)))) * (34.0); } # 195 "/tmp/tmp.rutwVzhqmN/in.c" double mflops(int nn, double cpu, double flop) { # 198 "/tmp/tmp.rutwVzhqmN/in.c" return ((flop / cpu) * (1.e-6)) * ((double)(nn)); } # 201 "/tmp/tmp.rutwVzhqmN/in.c" void set_param(int is[], char * size) { # 204 "/tmp/tmp.rutwVzhqmN/in.c" if((!(strcmp(size, "XS"))) || (!(strcmp(size, "xs")))) { # 205 "/tmp/tmp.rutwVzhqmN/in.c" (is[0]) = (32); # 206 "/tmp/tmp.rutwVzhqmN/in.c" (is[1]) = (32); # 207 "/tmp/tmp.rutwVzhqmN/in.c" (is[2]) = (64); # 208 "/tmp/tmp.rutwVzhqmN/in.c" return ; } # 210 "/tmp/tmp.rutwVzhqmN/in.c" if((!(strcmp(size, "S"))) || (!(strcmp(size, "s")))) { # 211 "/tmp/tmp.rutwVzhqmN/in.c" (is[0]) = (64); # 212 "/tmp/tmp.rutwVzhqmN/in.c" (is[1]) = (64); # 213 "/tmp/tmp.rutwVzhqmN/in.c" (is[2]) = (128); # 214 "/tmp/tmp.rutwVzhqmN/in.c" return ; } # 216 "/tmp/tmp.rutwVzhqmN/in.c" if((!(strcmp(size, "M"))) || (!(strcmp(size, "m")))) { # 217 "/tmp/tmp.rutwVzhqmN/in.c" (is[0]) = (128); # 218 "/tmp/tmp.rutwVzhqmN/in.c" (is[1]) = (128); # 219 "/tmp/tmp.rutwVzhqmN/in.c" (is[2]) = (256); # 220 "/tmp/tmp.rutwVzhqmN/in.c" return ; } # 222 "/tmp/tmp.rutwVzhqmN/in.c" if((!(strcmp(size, "L"))) || (!(strcmp(size, "l")))) { # 223 "/tmp/tmp.rutwVzhqmN/in.c" (is[0]) = (256); # 224 "/tmp/tmp.rutwVzhqmN/in.c" (is[1]) = (256); # 225 "/tmp/tmp.rutwVzhqmN/in.c" (is[2]) = (512); # 226 "/tmp/tmp.rutwVzhqmN/in.c" return ; } # 228 "/tmp/tmp.rutwVzhqmN/in.c" if((!(strcmp(size, "XL"))) || (!(strcmp(size, "xl")))) { # 229 "/tmp/tmp.rutwVzhqmN/in.c" (is[0]) = (512); # 230 "/tmp/tmp.rutwVzhqmN/in.c" (is[1]) = (512); # 231 "/tmp/tmp.rutwVzhqmN/in.c" (is[2]) = (1024); # 232 "/tmp/tmp.rutwVzhqmN/in.c" return ; } else { # 234 "/tmp/tmp.rutwVzhqmN/in.c" printf("Invalid input character !!\n"); # 235 "/tmp/tmp.rutwVzhqmN/in.c" exit(6); } } # 239 "/tmp/tmp.rutwVzhqmN/in.c" int newMat(struct Mat * Mat, int mnums, int mrows, int mcols, int mdeps) { # 242 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mnums) = mnums; # 243 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mrows) = mrows; # 244 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mcols) = mcols; # 245 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mdeps) = mdeps; # 246 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->m) = ((void * )(0)); # 247 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->m) = ((float * )(malloc_managed((((mnums * mrows) * mcols) * mdeps) * (sizeof(float))))); # 250 "/tmp/tmp.rutwVzhqmN/in.c" return ((Mat->m) != ((void * )(0))) ?(1) : (0); } # 253 "/tmp/tmp.rutwVzhqmN/in.c" void clearMat(struct Mat * Mat) { # 256 "/tmp/tmp.rutwVzhqmN/in.c" if(Mat->m) { # 257 "/tmp/tmp.rutwVzhqmN/in.c" free_managed(Mat->m); } # 258 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->m) = ((void * )(0)); # 259 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mnums) = (0); # 260 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mcols) = (0); # 261 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mrows) = (0); # 262 "/tmp/tmp.rutwVzhqmN/in.c" (Mat->mdeps) = (0); } # 265 "/tmp/tmp.rutwVzhqmN/in.c" void mat_set(struct Mat * Mat, int l, float val) { # 268 "/tmp/tmp.rutwVzhqmN/in.c" int i; # 268 "/tmp/tmp.rutwVzhqmN/in.c" int j; # 268 "/tmp/tmp.rutwVzhqmN/in.c" int k; # 270 "/tmp/tmp.rutwVzhqmN/in.c" for(i = (0); i < (Mat->mrows); i++) { { # 271 "/tmp/tmp.rutwVzhqmN/in.c" for(j = (0); j < (Mat->mcols); j++) { { # 272 "/tmp/tmp.rutwVzhqmN/in.c" for(k = (0); k < (Mat->mdeps); k++) { { # 273 "/tmp/tmp.rutwVzhqmN/in.c" (*((Mat->m) + ((((((l * (Mat->mrows)) * (Mat->mcols)) * (Mat->mdeps)) + ((i * (Mat->mcols)) * (Mat->mdeps))) + (j * (Mat->mdeps))) + k))) = val; } } } } } } } # 276 "/tmp/tmp.rutwVzhqmN/in.c" void mat_set_init(struct Mat * Mat) { # 279 "/tmp/tmp.rutwVzhqmN/in.c" int i; # 279 "/tmp/tmp.rutwVzhqmN/in.c" int j; # 279 "/tmp/tmp.rutwVzhqmN/in.c" int k; # 279 "/tmp/tmp.rutwVzhqmN/in.c" int l; # 280 "/tmp/tmp.rutwVzhqmN/in.c" float tt; # 282 "/tmp/tmp.rutwVzhqmN/in.c" for(i = (0); i < (Mat->mrows); i++) { { # 283 "/tmp/tmp.rutwVzhqmN/in.c" for(j = (0); j < (Mat->mcols); j++) { { # 284 "/tmp/tmp.rutwVzhqmN/in.c" for(k = (0); k < (Mat->mdeps); k++) { { # 285 "/tmp/tmp.rutwVzhqmN/in.c" (*((Mat->m) + (((((((0) * (Mat->mrows)) * (Mat->mcols)) * (Mat->mdeps)) + ((i * (Mat->mcols)) * (Mat->mdeps))) + (j * (Mat->mdeps))) + k))) = (((float)(i * i)) / ((float)(((Mat->mrows) - (1)) * ((Mat->mrows) - (1))))); } } } } } } } # 300 "/tmp/tmp.rutwVzhqmN/in.c" float jacobi(int nn, struct Mat * a, struct Mat * b, struct Mat * c, struct Mat * p, struct Mat * bnd, struct Mat * wrk1, struct Mat * wrk2) { # 304 "/tmp/tmp.rutwVzhqmN/in.c" unsigned long mrows = p->mrows; # 305 "/tmp/tmp.rutwVzhqmN/in.c" unsigned long mcols = p->mcols; # 306 "/tmp/tmp.rutwVzhqmN/in.c" unsigned long mdeps = p->mdeps; # 308 "/tmp/tmp.rutwVzhqmN/in.c" int i; # 308 "/tmp/tmp.rutwVzhqmN/in.c" int j; # 308 "/tmp/tmp.rutwVzhqmN/in.c" int k; # 308 "/tmp/tmp.rutwVzhqmN/in.c" int n; # 308 "/tmp/tmp.rutwVzhqmN/in.c" int imax; # 308 "/tmp/tmp.rutwVzhqmN/in.c" int jmax; # 308 "/tmp/tmp.rutwVzhqmN/in.c" int kmax; # 309 "/tmp/tmp.rutwVzhqmN/in.c" float gosa; # 309 "/tmp/tmp.rutwVzhqmN/in.c" float s0; # 309 "/tmp/tmp.rutwVzhqmN/in.c" float ss; # 311 "/tmp/tmp.rutwVzhqmN/in.c" float * a_m; # 311 "/tmp/tmp.rutwVzhqmN/in.c" float * b_m; # 311 "/tmp/tmp.rutwVzhqmN/in.c" float * c_m; # 311 "/tmp/tmp.rutwVzhqmN/in.c" float * p_m; # 311 "/tmp/tmp.rutwVzhqmN/in.c" float * bnd_m; # 311 "/tmp/tmp.rutwVzhqmN/in.c" float * wrk1_m; # 311 "/tmp/tmp.rutwVzhqmN/in.c" float * wrk2_m; # 313 "/tmp/tmp.rutwVzhqmN/in.c" imax = (mrows - (1)); # 314 "/tmp/tmp.rutwVzhqmN/in.c" jmax = (mcols - (1)); # 315 "/tmp/tmp.rutwVzhqmN/in.c" kmax = (mdeps - (1)); # 317 "/tmp/tmp.rutwVzhqmN/in.c" a_m = (a->m); # 318 "/tmp/tmp.rutwVzhqmN/in.c" b_m = (b->m); # 319 "/tmp/tmp.rutwVzhqmN/in.c" c_m = (c->m); # 320 "/tmp/tmp.rutwVzhqmN/in.c" p_m = (p->m); # 321 "/tmp/tmp.rutwVzhqmN/in.c" bnd_m = (bnd->m); # 322 "/tmp/tmp.rutwVzhqmN/in.c" wrk1_m = (wrk1->m); # 323 "/tmp/tmp.rutwVzhqmN/in.c" wrk2_m = (wrk2->m); { #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { } } # 326 "/tmp/tmp.rutwVzhqmN/in.c" for(n = (0); n < nn; n++) { { # 327 "/tmp/tmp.rutwVzhqmN/in.c" gosa = (0.0); { static int __macc_region_is_changed = 1; static int __macc_multi = 1; static void * __macc_ptrs[7]; static int __macc_use_types[7]; static int * __macc_use_lb_sets[7]; static int * __macc_use_ub_sets[7]; static int __macc_def_types[7]; static int * __macc_def_lb_sets[7]; static int * __macc_def_ub_sets[7]; static int __macc_wrk1_m_def_ub_set[10]; static int __macc_wrk1_m_def_lb_set[10]; static int __macc_wrk1_m_use_ub_set[10]; static int __macc_wrk1_m_use_lb_set[10]; static int __macc_p_m_def_ub_set[10]; static int __macc_p_m_def_lb_set[10]; static int __macc_p_m_use_ub_set[10]; static int __macc_p_m_use_lb_set[10]; static int __macc_c_m_def_ub_set[10]; static int __macc_c_m_def_lb_set[10]; static int __macc_c_m_use_ub_set[10]; static int __macc_c_m_use_lb_set[10]; static int __macc_bnd_m_def_ub_set[10]; static int __macc_bnd_m_def_lb_set[10]; static int __macc_bnd_m_use_ub_set[10]; static int __macc_bnd_m_use_lb_set[10]; static int __macc_b_m_def_ub_set[10]; static int __macc_b_m_def_lb_set[10]; static int __macc_b_m_use_ub_set[10]; static int __macc_b_m_use_lb_set[10]; static int __macc_a_m_def_ub_set[10]; static int __macc_a_m_def_lb_set[10]; static int __macc_a_m_use_ub_set[10]; static int __macc_a_m_use_lb_set[10]; static int __macc_wrk2_m_def_ub_set[10]; static int __macc_wrk2_m_def_lb_set[10]; static int __macc_wrk2_m_use_ub_set[10]; static int __macc_wrk2_m_use_lb_set[10]; static int __macc_imax_last; static unsigned long __macc_mrows_last; static unsigned long __macc_mcols_last; static unsigned long __macc_mdeps_last; static int __macc_i_loop_lb_set[10]; static int __macc_i_loop_ub_set[10]; __macc_region_is_changed = (__macc_region_is_changed || ((mdeps != __macc_mdeps_last) || ((mcols != __macc_mcols_last) || ((mrows != __macc_mrows_last) || (imax != __macc_imax_last))))); if(__macc_region_is_changed) { __macc_multi = (1); __macc_region_is_changed = (0); { __macc_mdeps_last = mdeps; __macc_mcols_last = mcols; __macc_mrows_last = mrows; __macc_imax_last = imax; } { __macc_calc_loop_region(__macc_i_loop_lb_set, __macc_i_loop_ub_set, 1, imax - (1), 1, 1); } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_gpu_num; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_gpu_num = (omp_get_thread_num()); { __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_gpu_num]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_gpu_num]); } { { { __macc_init_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set); { } { __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } __macc_adjust_data_region(wrk2_m, __macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set); __macc_adjust_data_region(wrk2_m, __macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set); } (__macc_ptrs[0]) = wrk2_m; (__macc_use_types[0]) = (1); (__macc_use_lb_sets[0]) = __macc_wrk2_m_use_lb_set; (__macc_use_ub_sets[0]) = __macc_wrk2_m_use_ub_set; (__macc_def_types[0]) = (2); (__macc_def_lb_sets[0]) = __macc_wrk2_m_def_lb_set; (__macc_def_ub_sets[0]) = __macc_wrk2_m_def_ub_set; } { { __macc_init_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_a_m_def_lb_set, __macc_a_m_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((3) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } { } __macc_adjust_data_region(a_m, __macc_gpu_num, __macc_a_m_use_lb_set, __macc_a_m_use_ub_set); __macc_adjust_data_region(a_m, __macc_gpu_num, __macc_a_m_def_lb_set, __macc_a_m_def_ub_set); } (__macc_ptrs[1]) = a_m; (__macc_use_types[1]) = (2); (__macc_use_lb_sets[1]) = __macc_a_m_use_lb_set; (__macc_use_ub_sets[1]) = __macc_a_m_use_ub_set; (__macc_def_types[1]) = (1); (__macc_def_lb_sets[1]) = __macc_a_m_def_lb_set; (__macc_def_ub_sets[1]) = __macc_a_m_def_ub_set; } { { __macc_init_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_b_m_def_lb_set, __macc_b_m_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } { } __macc_adjust_data_region(b_m, __macc_gpu_num, __macc_b_m_use_lb_set, __macc_b_m_use_ub_set); __macc_adjust_data_region(b_m, __macc_gpu_num, __macc_b_m_def_lb_set, __macc_b_m_def_ub_set); } (__macc_ptrs[2]) = b_m; (__macc_use_types[2]) = (2); (__macc_use_lb_sets[2]) = __macc_b_m_use_lb_set; (__macc_use_ub_sets[2]) = __macc_b_m_use_ub_set; (__macc_def_types[2]) = (1); (__macc_def_lb_sets[2]) = __macc_b_m_def_lb_set; (__macc_def_ub_sets[2]) = __macc_b_m_def_ub_set; } { { __macc_init_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_bnd_m_def_lb_set, __macc_bnd_m_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } { } __macc_adjust_data_region(bnd_m, __macc_gpu_num, __macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set); __macc_adjust_data_region(bnd_m, __macc_gpu_num, __macc_bnd_m_def_lb_set, __macc_bnd_m_def_ub_set); } (__macc_ptrs[3]) = bnd_m; (__macc_use_types[3]) = (2); (__macc_use_lb_sets[3]) = __macc_bnd_m_use_lb_set; (__macc_use_ub_sets[3]) = __macc_bnd_m_use_ub_set; (__macc_def_types[3]) = (1); (__macc_def_lb_sets[3]) = __macc_bnd_m_def_lb_set; (__macc_def_ub_sets[3]) = __macc_bnd_m_def_ub_set; } { { __macc_init_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_c_m_def_lb_set, __macc_c_m_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((2) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((1) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } { } __macc_adjust_data_region(c_m, __macc_gpu_num, __macc_c_m_use_lb_set, __macc_c_m_use_ub_set); __macc_adjust_data_region(c_m, __macc_gpu_num, __macc_c_m_def_lb_set, __macc_c_m_def_ub_set); } (__macc_ptrs[4]) = c_m; (__macc_use_types[4]) = (2); (__macc_use_lb_sets[4]) = __macc_c_m_use_lb_set; (__macc_use_ub_sets[4]) = __macc_c_m_use_ub_set; (__macc_def_types[4]) = (1); (__macc_def_lb_sets[4]) = __macc_c_m_def_lb_set; (__macc_def_ub_sets[4]) = __macc_c_m_def_ub_set; } { { __macc_init_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub + (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_lb - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + (((__macc_top_loop_ub - (1)) * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) + (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((1) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + (((jmax - (1)) - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) + (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((1) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + ((kmax - (1)) - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } { } __macc_adjust_data_region(p_m, __macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set); __macc_adjust_data_region(p_m, __macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set); } (__macc_ptrs[5]) = p_m; (__macc_use_types[5]) = (2); (__macc_use_lb_sets[5]) = __macc_p_m_use_lb_set; (__macc_use_ub_sets[5]) = __macc_p_m_use_ub_set; (__macc_def_types[5]) = (1); (__macc_def_lb_sets[5]) = __macc_p_m_def_lb_set; (__macc_def_ub_sets[5]) = __macc_p_m_def_ub_set; } { { __macc_init_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_wrk1_m_def_lb_set, __macc_wrk1_m_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } { } __macc_adjust_data_region(wrk1_m, __macc_gpu_num, __macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set); __macc_adjust_data_region(wrk1_m, __macc_gpu_num, __macc_wrk1_m_def_lb_set, __macc_wrk1_m_def_ub_set); } (__macc_ptrs[6]) = wrk1_m; (__macc_use_types[6]) = (2); (__macc_use_lb_sets[6]) = __macc_wrk1_m_use_lb_set; (__macc_use_ub_sets[6]) = __macc_wrk1_m_use_ub_set; (__macc_def_types[6]) = (1); (__macc_def_lb_sets[6]) = __macc_wrk1_m_def_lb_set; (__macc_def_ub_sets[6]) = __macc_wrk1_m_def_ub_set; } } } if(__macc_region_is_overlapping(__macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set)) { __macc_multi = (0); { __macc_rewrite_loop_region_into_single(__macc_i_loop_lb_set, __macc_i_loop_ub_set); { __macc_rewrite_data_region_into_single(__macc_wrk1_m_use_lb_set, __macc_wrk1_m_use_ub_set); __macc_rewrite_data_region_into_single(__macc_p_m_use_lb_set, __macc_p_m_use_ub_set); __macc_rewrite_data_region_into_single(__macc_c_m_use_lb_set, __macc_c_m_use_ub_set); __macc_rewrite_data_region_into_single(__macc_bnd_m_use_lb_set, __macc_bnd_m_use_ub_set); __macc_rewrite_data_region_into_single(__macc_b_m_use_lb_set, __macc_b_m_use_ub_set); __macc_rewrite_data_region_into_single(__macc_a_m_use_lb_set, __macc_a_m_use_ub_set); __macc_rewrite_data_region_into_single(__macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set); } } } } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) reduction ( + : gosa ) private ( i , j , k ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { int __macc_num_gangs; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_tnum]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_tnum]); __macc_set_data_region_multi(__macc_tnum, __macc_multi, 7, __macc_ptrs, __macc_use_types, __macc_use_lb_sets, __macc_use_ub_sets, __macc_def_types, __macc_def_lb_sets, __macc_def_ub_sets); #pragma omp barrier #pragma acc parallel present ( a_m , b_m , c_m , p_m , bnd_m , wrk1_m , wrk2_m ) vector_length ( 256 ) reduction ( + : gosa ) #pragma acc loop independent collapse ( 3 ) # 330 "/tmp/tmp.rutwVzhqmN/in.c" for(i = __macc_top_loop_lb; i <= __macc_top_loop_ub; i++) { { # 331 "/tmp/tmp.rutwVzhqmN/in.c" for(j = (1); j < jmax; j++) { { # 332 "/tmp/tmp.rutwVzhqmN/in.c" for(k = (1); k < kmax; k++) { { # 333 "/tmp/tmp.rutwVzhqmN/in.c" s0 = (((((((((((a_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i + (1)) * mcols) * mdeps)) + (j * mdeps)) + k)])) + ((a_m[(((((((1) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + ((j + (1)) * mdeps)) + k)]))) + ((a_m[(((((((2) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + (k + (1)))]))) + ((b_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * ((((p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i + (1)) * mcols) * mdeps)) + ((j + (1)) * mdeps)) + k)]) - (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i + (1)) * mcols) * mdeps)) + ((j - (1)) * mdeps)) + k)])) - (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i - (1)) * mcols) * mdeps)) + ((j + (1)) * mdeps)) + k)])) + (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i - (1)) * mcols) * mdeps)) + ((j - (1)) * mdeps)) + k)])))) + ((b_m[(((((((1) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * ((((p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + ((j + (1)) * mdeps)) + (k + (1)))]) - (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + ((j - (1)) * mdeps)) + (k + (1)))])) - (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + ((j + (1)) * mdeps)) + (k - (1)))])) + (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + ((j - (1)) * mdeps)) + (k - (1)))])))) + ((b_m[(((((((2) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * ((((p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i + (1)) * mcols) * mdeps)) + (j * mdeps)) + (k + (1)))]) - (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i - (1)) * mcols) * mdeps)) + (j * mdeps)) + (k + (1)))])) - (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i + (1)) * mcols) * mdeps)) + (j * mdeps)) + (k - (1)))])) + (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i - (1)) * mcols) * mdeps)) + (j * mdeps)) + (k - (1)))])))) + ((c_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * (p_m[(((((((0) * mrows) * mcols) * mdeps) + (((i - (1)) * mcols) * mdeps)) + (j * mdeps)) + k)]))) + ((c_m[(((((((1) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + ((j - (1)) * mdeps)) + k)]))) + ((c_m[(((((((2) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) * (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + (k - (1)))]))) + (wrk1_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)])); # 350 "/tmp/tmp.rutwVzhqmN/in.c" ss = (((s0 * (a_m[(((((((3) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)])) - (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)])) * (bnd_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)])); # 352 "/tmp/tmp.rutwVzhqmN/in.c" gosa += (ss * ss); # 353 "/tmp/tmp.rutwVzhqmN/in.c" (wrk2_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) = ((p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) + (omega * ss)); } } } } } } } } } { static int __macc_region_is_changed = 1; static int __macc_multi = 1; static void * __macc_ptrs[2]; static int __macc_use_types[2]; static int * __macc_use_lb_sets[2]; static int * __macc_use_ub_sets[2]; static int __macc_def_types[2]; static int * __macc_def_lb_sets[2]; static int * __macc_def_ub_sets[2]; static int __macc_wrk2_m_def_ub_set[10]; static int __macc_wrk2_m_def_lb_set[10]; static int __macc_wrk2_m_use_ub_set[10]; static int __macc_wrk2_m_use_lb_set[10]; static int __macc_p_m_def_ub_set[10]; static int __macc_p_m_def_lb_set[10]; static int __macc_p_m_use_ub_set[10]; static int __macc_p_m_use_lb_set[10]; static int __macc_imax_last; static unsigned long __macc_mrows_last; static unsigned long __macc_mcols_last; static unsigned long __macc_mdeps_last; static int __macc_i_loop_lb_set[10]; static int __macc_i_loop_ub_set[10]; __macc_region_is_changed = (__macc_region_is_changed || ((mdeps != __macc_mdeps_last) || ((mcols != __macc_mcols_last) || ((mrows != __macc_mrows_last) || (imax != __macc_imax_last))))); if(__macc_region_is_changed) { __macc_multi = (1); __macc_region_is_changed = (0); { __macc_mdeps_last = mdeps; __macc_mcols_last = mcols; __macc_mrows_last = mrows; __macc_imax_last = imax; } { __macc_calc_loop_region(__macc_i_loop_lb_set, __macc_i_loop_ub_set, 1, imax - (1), 1, 1); } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_gpu_num; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_gpu_num = (omp_get_thread_num()); { __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_gpu_num]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_gpu_num]); } { { { __macc_init_access_region(__macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set); { } { __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } __macc_adjust_data_region(p_m, __macc_gpu_num, __macc_p_m_use_lb_set, __macc_p_m_use_ub_set); __macc_adjust_data_region(p_m, __macc_gpu_num, __macc_p_m_def_lb_set, __macc_p_m_def_ub_set); } (__macc_ptrs[0]) = p_m; (__macc_use_types[0]) = (1); (__macc_use_lb_sets[0]) = __macc_p_m_use_lb_set; (__macc_use_ub_sets[0]) = __macc_p_m_use_ub_set; (__macc_def_types[0]) = (2); (__macc_def_lb_sets[0]) = __macc_p_m_def_lb_set; (__macc_def_ub_sets[0]) = __macc_p_m_def_ub_set; } { { __macc_init_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set); __macc_init_access_region(__macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set); { __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_lb * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((1) * mdeps)) + (kmax - (1))); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (1)); __macc_update_access_region(__macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set, ((((((0) * mrows) * mcols) * mdeps) + ((__macc_top_loop_ub * mcols) * mdeps)) + ((jmax - (1)) * mdeps)) + (kmax - (1))); } { } __macc_adjust_data_region(wrk2_m, __macc_gpu_num, __macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set); __macc_adjust_data_region(wrk2_m, __macc_gpu_num, __macc_wrk2_m_def_lb_set, __macc_wrk2_m_def_ub_set); } (__macc_ptrs[1]) = wrk2_m; (__macc_use_types[1]) = (2); (__macc_use_lb_sets[1]) = __macc_wrk2_m_use_lb_set; (__macc_use_ub_sets[1]) = __macc_wrk2_m_use_ub_set; (__macc_def_types[1]) = (1); (__macc_def_lb_sets[1]) = __macc_wrk2_m_def_lb_set; (__macc_def_ub_sets[1]) = __macc_wrk2_m_def_ub_set; } } } if(__macc_region_is_overlapping(__macc_p_m_def_lb_set, __macc_p_m_def_ub_set)) { __macc_multi = (0); { __macc_rewrite_loop_region_into_single(__macc_i_loop_lb_set, __macc_i_loop_ub_set); { __macc_rewrite_data_region_into_single(__macc_wrk2_m_use_lb_set, __macc_wrk2_m_use_ub_set); __macc_rewrite_data_region_into_single(__macc_p_m_def_lb_set, __macc_p_m_def_ub_set); } } } } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) private ( i , j , k ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { int __macc_num_gangs; int __macc_top_loop_lb; int __macc_top_loop_ub; __macc_top_loop_lb = (__macc_i_loop_lb_set[__macc_tnum]); __macc_top_loop_ub = (__macc_i_loop_ub_set[__macc_tnum]); __macc_set_data_region_multi(__macc_tnum, __macc_multi, 2, __macc_ptrs, __macc_use_types, __macc_use_lb_sets, __macc_use_ub_sets, __macc_def_types, __macc_def_lb_sets, __macc_def_ub_sets); #pragma omp barrier #pragma acc parallel present ( a_m , b_m , c_m , p_m , bnd_m , wrk1_m , wrk2_m ) vector_length ( 256 ) #pragma acc loop independent collapse ( 3 ) # 357 "/tmp/tmp.rutwVzhqmN/in.c" for(i = __macc_top_loop_lb; i <= __macc_top_loop_ub; i++) { { # 358 "/tmp/tmp.rutwVzhqmN/in.c" for(j = (1); j < jmax; j++) { { # 359 "/tmp/tmp.rutwVzhqmN/in.c" for(k = (1); k < kmax; k++) { { # 360 "/tmp/tmp.rutwVzhqmN/in.c" (p_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]) = (wrk2_m[(((((((0) * mrows) * mcols) * mdeps) + ((i * mcols) * mdeps)) + (j * mdeps)) + k)]); } } } } } } } } } } } #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int __macc_tnum = omp_get_thread_num(); __macc_set_gpu_num(__macc_tnum); { } } } # 364 "/tmp/tmp.rutwVzhqmN/in.c" return gosa; }
pacset_gb_classifier.h
#ifndef PACSET_GB #define PACSET_GB #include <vector> #include <unordered_set> #include <fstream> #include "pacset_base_model.h" #include "packer.h" #include "config.h" #include "json_reader.h" #include "utils.h" #include "node.h" #include "MemoryMapped.h" #define BLOCK_LOGGING 1 #define LAT_LOGGING 1 template <typename T, typename F> class PacsetGradientBoostedClassifier: public PacsetBaseModel<T, F> { public: inline void setMembers(const std::vector<int> &bin_sizes, const std::vector<int> &bin_node_sizes, const std::vector<std::vector<int>> &bin_start){ PacsetBaseModel<T, F>::bin_sizes.clear(); std::copy(bin_sizes.begin(), bin_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_sizes)); std::copy(bin_node_sizes.begin(), bin_node_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_node_sizes)); for (auto i: bin_start) PacsetBaseModel<T, F>::bin_start.push_back(i); } inline void setBinNodeSizes(int pos, int siz){ PacsetBaseModel<T, F>::bin_node_sizes[pos] = siz; } inline void loadModel() { JSONReader<T, F> J; //J.convertXG(PacsetBaseModel<T, F>::bins, J.convertSklToBinsRapidJson(PacsetBaseModel<T, F>::bins, PacsetBaseModel<T, F>::bin_sizes, PacsetBaseModel<T, F>::bin_start, PacsetBaseModel<T, F>::bin_node_sizes); } inline void pack(){ std::string layout = Config::getValue("layout"); auto bin = PacsetBaseModel<T, F>::bins[0]; int num_bins = std::stoi(Config::getValue("numthreads")); for(int i=0; i<num_bins; ++i){ Packer<T, F> packer_obj(layout); if(Config::getValue("intertwine") != std::string("notfound")) packer_obj.setDepthIntertwined(std::atoi(Config::getValue("intertwine").c_str())); //should pack in place packer_obj.pack(PacsetBaseModel<T, F>::bins[i], PacsetBaseModel<T, F>::bin_sizes[i], PacsetBaseModel<T, F>::bin_start[i] ); setBinNodeSizes(i, PacsetBaseModel<T, F>::bins[i].size()); } } inline int mmapAndPredict(const std::vector<T>& observation, std::vector<double> &preds, int obsnum) { int num_classes = std::stoi(Config::getValue("numclasses")); int num_threads = std::stoi(Config::getValue("numthreads")); int num_bins = PacsetBaseModel<T, F>::bin_sizes.size(); int total_num_trees = 0; std::for_each( PacsetBaseModel<T, F>::bin_sizes.begin(), PacsetBaseModel<T, F>::bin_sizes.end(), [&] (int n) { total_num_trees += n; }); std::vector<double> elapsed_arr; int num_boosters = total_num_trees / num_classes; std::vector<std::vector<float> > result_mat( num_boosters, std::vector<float>(num_classes)) ; std::vector<float> pred_mat(num_classes); std::string modelfname = Config::getValue("modelfilename"); MemoryMapped mmapped_obj(modelfname.c_str(), 0); Node<T, F> *data = (Node<T, F>*)mmapped_obj.getData(); std::unordered_set<int> blocks_accessed; int next_node = 0; int block_offset = 0; int offset = 0; double leaf_sum = 0; std::vector<int> offsets; std::vector<int> tree_offsets; int curr_offset = 0; int bin_tree_offset = 0; total_num_trees=0; float pred_val = 0; for (auto val: PacsetBaseModel<T, F>::bin_node_sizes){ offsets.push_back(curr_offset); curr_offset += val; } tree_offsets.push_back(0); int it = 0; for(auto val: PacsetBaseModel<T, F>::bin_sizes){ tree_offsets.push_back(tree_offsets[it++] + val); } #pragma omp parallel for num_threads(num_threads) for(int bin_counter=0; bin_counter<num_bins; ++bin_counter){ int num_estimators = PacsetBaseModel<T, F>::bin_sizes[bin_counter] / num_classes; int block_number = 0; Node<T, F> *bin = data + offsets[bin_counter]; std::vector<int> curr_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]); int i, feature_num=0, number_not_in_leaf=0; T feature_val; int siz = PacsetBaseModel<T, F>::bin_sizes[bin_counter]; total_num_trees += siz; bin_tree_offset = tree_offsets[bin_counter]; for(i=0; i<siz; ++i){ curr_node[i] = PacsetBaseModel<T, F>::bin_start[bin_counter][i]; __builtin_prefetch(&bin[curr_node[i]], 0, 3); #ifdef BLOCK_LOGGING block_number = (curr_node[i] + block_offset) / BLOCK_SIZE; #pragma omp critical blocks_accessed.insert(block_number); #endif } do{ number_not_in_leaf = 0; for( i=0; i<siz; ++i){ if(curr_node[i] >= 0){ #ifdef BLOCK_LOGGING block_number = (curr_node[i] + block_offset)/ BLOCK_SIZE; #pragma omp critical blocks_accessed.insert(block_number); #endif feature_num = bin[curr_node[i]].getFeature(); feature_val = observation[feature_num]; if(bin[curr_node[i]].getLeft() == -1){ if(num_classes == 2) pred_val += bin[curr_node[i]].getThreshold(); pred_mat[(bin_tree_offset+i) % num_classes] += bin[curr_node[i]].getThreshold(); curr_node[i] = -1; } else { curr_node[i] = bin[curr_node[i]].nextNode(feature_val); __builtin_prefetch(&bin[curr_node[i]], 0, 3); ++number_not_in_leaf; } } } }while(number_not_in_leaf); #pragma omp critical { block_offset += PacsetBaseModel<T, F>::bin_node_sizes[bin_counter]; } } if(num_classes == 2){ preds.clear(); float val = logit(pred_val/((float)total_num_trees/2.0)); if(val > 0.5) preds.push_back(1.0); else preds.push_back(0.0); }else{ std::vector<float>result_mat_proba(pred_mat); //for (auto &ele : pred_mat) // ele = (float)ele/(float)total_num_trees; //std::vector<float>result_mat_proba; //result_mat_proba = logit(pred_mat); int max = result_mat_proba[0]; int maxid = 0; for(int i=0; i<num_classes; ++i){ if(result_mat_proba[i] > max){ maxid = i; max = result_mat_proba[i]; } } preds.clear(); preds.push_back((double)maxid); } preds.push_back((double)1); #ifdef BLOCK_LOGGING return blocks_accessed.size(); #else return 0; #endif } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<int>& preds, std::vector<int>&results, bool mmap) { } inline void predict(const std::vector<std::vector<T>>& observation, std::vector<double>& preds, std::vector<double>&results, bool mmap) { //Predicts the class for a vector of observations //By calling predict for a single observation and //tallying the observations // int num_classes = std::stoi(Config::getValue("numclasses")); int num_bins; std::vector<double> elapsed_arr; int blocks; std::vector<int> num_blocks; int ct=1; float cumi_time = 0; for(auto single_obs : observation){ auto start = std::chrono::steady_clock::now(); if (mmap) blocks = mmapAndPredict(single_obs, preds, ct); else{ blocks = mmapAndPredict(single_obs, preds, ct); } num_blocks.push_back(blocks); results.push_back((double)preds[0] / (double)preds[1] ); auto end = std::chrono::steady_clock::now(); ct+=1; } } inline void serialize() { auto bins = PacsetBaseModel<T, F>::bins; int num_classes = std::stoi(Config::getValue("numclassesactual")); int num_bins = bins.size(); std::vector<int> bin_sizes = PacsetBaseModel<T, F>::bin_sizes; std::vector<int> bin_node_sizes = PacsetBaseModel<T, F>::bin_node_sizes; std::vector<std::vector<int>> bin_start = PacsetBaseModel<T, F>::bin_start; std::string format = Config::getValue("format"); //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename std::string filename; if(Config::getValue("metadatafilename") == std::string("notfound")) filename = "metadata.txt"; else filename = Config::getValue("metadatafilename"); std::fstream fout; fout.open(filename, std::ios::out ); //Number of classes fout<<num_classes<<"\n"; //Number of bins fout<<num_bins<<"\n"; //Number of trees in each bin for(auto i: bin_sizes){ fout<<i<<"\n"; } //Number of nodes in each bin for(auto i: bin_node_sizes){ fout<<i<<"\n"; } //start position of each bin for(auto bin: bin_start){ for(auto tree_start: bin){ fout<<tree_start<<"\n"; } } fout<<Config::getValue("initModelFilename")<<"\n"; fout.close(); if(format == std::string("binary")){ std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.bin"; //Write the nodes fout.open(filename, std::ios::binary | std::ios::out ); Node<T, F> node_to_write; for(auto bin: bins){ for(auto node: bin){ node_to_write = node; fout.write((char*)&node_to_write, sizeof(node_to_write)); } } fout.close(); } else{ //Write the nodes std::string modelfname = Config::getValue("packfilename"); std::string filename; if(modelfname != std::string("notfound")) filename = modelfname; else filename = "packedmodel.txt"; std::cout<<"filename: "<<filename <<"\n"; fout.open(filename, std::ios::out ); for(auto bin: bins){ for(auto node: bin){ fout<<node.getLeft()<<", "<<node.getRight() <<", "<<node.getFeature()<<", "<<node.getThreshold()<<"\n"; } } fout.close(); } } inline void deserialize(){ //Write the metadata needed to reconstruct bins and for prediction //TODO: change filename int num_classes, num_bins; std::string filename = Config::getValue("metadatafilename"); //std::string filename = "metadata.txt"; std::fstream f; f.open(filename, std::ios::in ); //Number of classes f>>num_classes; Config::setConfigItem("numclasses", std::to_string(num_classes)); //Number of bins f>>num_bins; Config::setConfigItem("numthreads", std::to_string(num_bins)); std::vector<int> num_trees_bin; std::vector<int> num_nodes_bin; std::vector<std::vector<int>> bin_tree_start; int val; //Number of trees in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_trees_bin.push_back(val); } //Number of nodes in each bin for(int i=0; i<num_bins; ++i){ f>>val; num_nodes_bin.push_back(val); } std::vector<int> temp; //start position of each bin for(int i=0; i<num_bins; ++i){ for(int j=0; j<num_trees_bin[i]; ++j){ f>>val; temp.push_back(val); } bin_tree_start.push_back(temp); temp.clear(); } f.close(); setMembers(num_trees_bin, num_nodes_bin, bin_tree_start); } }; #endif
pi-parallel.c
#include <stdio.h> #include <math.h> // integrate $\sqrt{1-x^2}$ for $x \in (a,b)$ with $n$ sub-intervals. double integrate_semicircle(double a, double b, int n) { double sum = 0.0; double h = (b-a) / n; double x0 = a + h/2.0; #pragma omp parallel for reduction(+: sum) for (int i = 0; i < n; i++) { double x = x0 + i*h; sum += sqrt(1-x*x); } return sum * h; } int main() { double pi = 2 * integrate_semicircle(-1, 1, 1000*1000*100); printf("pi = %g\n", pi); }
GB_unaryop__abs_fp64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_int64 // op(A') function: GB_tran__abs_fp64_int64 // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_int64 ( double *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); NamedDecl * ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
GB_binop__bget_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint16) // C=scalar+B GB (_bind1st__bget_uint16) // C=scalar+B' GB (_bind1st_tran__bget_uint16) // C=A+scalar GB (_bind2nd__bget_uint16) // C=A'+scalar GB (_bind2nd_tran__bget_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bget_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fused_rowwise_nbitfake_conversion_ops.h
#pragma once #ifdef _OPENMP #include <omp.h> #endif #include "caffe2/core/context.h" #include "caffe2/core/logging.h" #include "caffe2/core/operator.h" #include "caffe2/operators/reducer_functors.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace internal { inline bool is_little_endian() { constexpr std::int32_t kValue = 1; return reinterpret_cast<const std::uint8_t*>(&kValue)[0] == 1; } void convertfp32fp32(float* dst, const float* src, size_t N); void convertfp16fp32(float* dst, const at::Half* src, size_t N); /** * @params Xmin initial solution passed and potentiall better solution returns * @params Xmax initial solution passed and potentiall better solution returns */ void param_search_greedy( const float* X, int N, const int n_bins, // = 200, const float ratio, // = 0.16, float& Xmin, float& Xmax, int bit_rate); } // namespace internal // Fake 2/4 bit quantization // Creates a 2/4bit rowwise quantized blob with scales and biases in fp16 // The storage format is 8 bit rowwise with scales and biases in fp32 template < int BIT_RATE, typename T, void (*convert)(float* dst, const T* src, size_t N), bool GREEDY = false> class FloatToFusedNBitFakeRowwiseQuantizedOp final : public Operator<CPUContext> { public: FloatToFusedNBitFakeRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws) : Operator<CPUContext>(def, ws) {} ~FloatToFusedNBitFakeRowwiseQuantizedOp() override {} bool RunOnDevice() override { CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness"); const auto& input = Input(DATA_FLOAT); const auto input_rows = input.size(0); const auto input_columns = input.size(1); CAFFE_ENFORCE_EQ(input.dim(), 2, "Expect input to be a matrix"); const std::vector<int64_t> output_dimensions = {input_rows, input_columns + 8}; auto* output = Output( DATA_FUSED_SCALE_BIAS_INT8, output_dimensions, at::dtype<uint8_t>()); const auto* input_data = input.template data<T>(); auto* output_data = output->template mutable_data<uint8_t>(); const auto output_columns = output->size(1); if (!std::is_same<T, float>::value && !std::is_same<T, at::Half>::value) { CAFFE_THROW("Unsupported data type"); } bool use_openmp = GREEDY; #ifdef _OPENMP vector<float> tmp_vec(input_columns * (GREEDY ? omp_get_max_threads() : 1)); #else vector<float> tmp_vec(input_columns); #endif #pragma omp parallel for if (GREEDY) for (int row = 0; row < input_rows; ++row) { float* tmp = tmp_vec.data(); #ifdef _OPENMP if (GREEDY) { tmp = &tmp_vec[omp_get_thread_num() * input_columns]; } #endif convert(tmp, input_data + row * input_columns, input_columns); uint8_t* output_row = output_data + row * output_columns; float* output_row_scale_bias = reinterpret_cast<float*>(output_row + input_columns); float minimum_element = *std::min_element(tmp, tmp + input_columns); float maximum_element = *std::max_element(tmp, tmp + input_columns); if (GREEDY) { internal::param_search_greedy( tmp, input_columns, 200, 0.16, minimum_element, maximum_element, BIT_RATE); } minimum_element = static_cast<at::Half>(minimum_element); const float range = maximum_element - minimum_element; const float scale = range == 0 ? 1.0f : static_cast<float>(static_cast<at::Half>( range / static_cast<float>((1 << BIT_RATE) - 1))); const float inverse_scale = 1.0f / scale; output_row_scale_bias[0] = scale; output_row_scale_bias[1] = minimum_element; // NOLINTNEXTLINE(clang-diagnostic-sign-compare) for (size_t col = 0; col < input_columns; ++col) { output_row[col] = std::max( 0, std::min<int>( std::lrintf((tmp[col] - minimum_element) * inverse_scale), (1 << BIT_RATE) - 1)); } } return true; } private: INPUT_TAGS(DATA_FLOAT); // INT8 suffix because this is a fake quantization operator whose output // type is always 8-bit regardless of BIT_RATE. OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS_INT8); }; } // namespace caffe2
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/opencl-private.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/threshold.h" #ifdef MAGICKCORE_CLPERFMARKER #include "CLPerfMarker.h" #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveBlurImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *AdaptiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double **kernel, normalize; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*QuantumScale* GetPixelIntensity(edge_image,r)-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel[i]; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveSharpenImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma, exception); return(sharp_image); } MagickExport Image *AdaptiveSharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double **kernel, normalize; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse) { InheritException(exception,&sharp_image->exception); sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict sharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view); for (x=0; x < (ssize_t) sharp_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); k=kernel[i]; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *BlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception); return(blur_image); } MagickExport Image *BlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image = NULL; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,channel,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const size_t order, % const double *kernel,ExceptionInfo *exception) % Image *ConvolveImageChannel(const Image *image,const ChannelType channel, % const size_t order,const double *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o order: the number of columns and rows in the filter kernel. % % o kernel: An array of double representing the convolution kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image,const size_t order, const double *kernel,ExceptionInfo *exception) { Image *convolve_image; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel, exception); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(convolve_image); } MagickExport Image *ConvolveImageChannel(const Image *image, const ChannelType channel,const size_t order,const double *kernel, ExceptionInfo *exception) { Image *convolve_image; KernelInfo *kernel_info; register ssize_t i; kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=order; kernel_info->height=order; kernel_info->x=(ssize_t) (order-1)/2; kernel_info->y=(ssize_t) (order-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (order*order); i++) kernel_info->values[i]=kernel[i]; convolve_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (convolve_image == (Image *) NULL) convolve_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; register ssize_t i; Quantum *magick_restrict buffer, *magick_restrict pixels; size_t length, number_channels; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image, exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse) { InheritException(exception,&despeckle_image->exception); despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4); image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) number_channels; i++) { register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; if ((image->matte == MagickFalse) && (i == 3)) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: pixels[j]=GetPixelRed(p); break; case 1: pixels[j]=GetPixelGreen(p); break; case 2: pixels[j]=GetPixelBlue(p); break; case 3: pixels[j]=GetPixelOpacity(p); break; case 4: pixels[j]=GetPixelBlack(indexes+x); break; default: break; } p++; j++; } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(despeckle_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: SetPixelRed(q,pixels[j]); break; case 1: SetPixelGreen(q,pixels[j]); break; case 2: SetPixelBlue(q,pixels[j]); break; case 3: SetPixelOpacity(q,pixels[j]); break; case 4: SetPixelIndex(indexes+x,pixels[j]); break; default: break; } q++; j++; } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) { status=MagickFalse; break; } j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, number_channels); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (edge_image == (Image *) NULL) edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology, 1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (emboss_image == (Image *) NULL) emboss_image=MorphologyImageChannel(image,DefaultChannels, ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImageChannel(emboss_image,(ChannelType) (AllChannels &~ SyncChannels)); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FilterImage() applies a custom convolution kernel to the image. % % The format of the FilterImage method is: % % Image *FilterImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % Image *FilterImageChannel(const Image *image,const ChannelType channel, % const KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel, ExceptionInfo *exception) { Image *filter_image; filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception); return(filter_image); } MagickExport Image *FilterImageChannel(const Image *image, const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception) { #define FilterImageTag "Filter/Image" CacheView *filter_view, *image_view; Image *filter_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType *filter_kernel; register ssize_t i; ssize_t y; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif /* Initialize filter image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((kernel->width % 2) == 0) ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber"); if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double) kernel->height); message=AcquireString(""); k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) kernel->width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } #if defined(MAGICKCORE_OPENCL_SUPPORT) filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception); if (filter_image != (Image *) NULL) { #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } #endif filter_image=CloneImage(image,0,0,MagickTrue,exception); if (filter_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse) { InheritException(exception,&filter_image->exception); filter_image=DestroyImage(filter_image); return((Image *) NULL); } /* Normalize kernel. */ filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*filter_kernel))); if (filter_kernel == (MagickRealType *) NULL) { filter_image=DestroyImage(filter_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) filter_kernel[i]=(MagickRealType) kernel->values[i]; /* Filter image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); filter_view=AcquireAuthenticCacheView(filter_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,filter_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict filter_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y- (ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width, kernel->height,exception); q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket pixel; register const MagickRealType *magick_restrict k; register const PixelPacket *magick_restrict kernel_pixels; register ssize_t u; ssize_t v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=filter_kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.red+=(*k)*kernel_pixels[u].red; pixel.green+=(*k)*kernel_pixels[u].green; pixel.blue+=(*k)*kernel_pixels[u].blue; k++; } kernel_pixels+=image->columns+kernel->width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*kernel_pixels[u].opacity; k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u); k++; } kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index)); } } else { double alpha, gamma; gamma=0.0; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- GetPixelOpacity(kernel_pixels+u))); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u); gamma+=(*k)*alpha; k++; } kernel_pixels+=image->columns+kernel->width; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u); k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_pixels=p; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u); k++; } kernel_pixels+=image->columns+kernel->width; kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } sync=SyncCacheViewAuthenticPixels(filter_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FilterImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } filter_image->type=image->type; filter_view=DestroyCacheView(filter_view); image_view=DestroyCacheView(image_view); filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel); if (status == MagickFalse) filter_image=DestroyImage(filter_image); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % Image *GaussianBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *GaussianBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (blur_image == (Image *) NULL) blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % Image *MotionBlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static double *GetMotionBlurKernel(const size_t width,const double sigma) { double *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { Image *motion_blur; motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle, exception); return(motion_blur); } MagickExport Image *MotionBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,channel,kernel,width,offset, exception); if (blur_image != (Image *) NULL) return blur_image; #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket qixel; PixelPacket pixel; register const IndexPacket *magick_restrict indexes; register double *magick_restrict k; register ssize_t i; k=kernel; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); qixel.red+=(*k)*pixel.red; qixel.green+=(*k)*pixel.green; qixel.blue+=(*k)*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*(*indexes); } k++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index)); } else { double alpha, gamma; alpha=0.0; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=(*k)*alpha*pixel.red; qixel.green+=(*k)*alpha*pixel.green; qixel.blue+=(*k)*alpha*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*alpha*GetPixelIndex(indexes); } gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double width, % const double sigma,ExceptionInfo *exception) % Image *KuwaharaImageChannel(const Image *image,const ChannelType channel, % const double width,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *kuwahara_image; kuwahara_image=KuwaharaImageChannel(image,DefaultChannels,radius,sigma, exception); return(kuwahara_image); } MagickExport Image *KuwaharaImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define KuwaharaImageTag "Kiwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) channel; width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass) == MagickFalse) { InheritException(exception,&kuwahara_image->exception); gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,kuwahara_image->rows,1) #endif for (y=0; y < (ssize_t) kuwahara_image->rows; y++) { register IndexPacket *magick_restrict kuwahara_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } kuwahara_indexes=GetCacheViewAuthenticIndexQueue(kuwahara_view); for (x=0; x < (ssize_t) kuwahara_image->columns; x++) { double min_variance; MagickPixelPacket pixel; RectangleInfo quadrant, target; register ssize_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const PixelPacket *magick_restrict p; double variance; MagickPixelPacket mean; register const PixelPacket *magick_restrict k; register ssize_t n; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const PixelPacket *) NULL) break; GetMagickPixelPacket(image,&mean); k=p; for (n=0; n < (ssize_t) (width*width); n++) { mean.red+=(double) k->red; mean.green+=(double) k->green; mean.blue+=(double) k->blue; k++; } mean.red/=(double) (width*width); mean.green/=(double) (width*width); mean.blue/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(image,k); variance+=(luma-MagickPixelLuma(&mean))*(luma-MagickPixelLuma(&mean)); k++; } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolateMagickPixelPacket(gaussian_image,image_view, UndefinedInterpolatePixel,(double) target.x+target.width/2.0, (double) target.y+target.height/2.0,&pixel,exception); if (status == MagickFalse) break; SetPixelPacket(kuwahara_image,&pixel,q,kuwahara_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass) == MagickFalse) { InheritException(exception,&contrast_image->exception); contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory(GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(width+1)*(width+1); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p++; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *pix, *pixels; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(q,ClampToQuantum(GetPixelRed(p)*mult)); SetPixelGreen(q,ClampToQuantum(GetPixelGreen(p)*mult)); SetPixelBlue(q,ClampToQuantum(GetPixelBlue(p)*mult)); p++; q++; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MaxTextExtent], label[MaxTextExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel); if (i == (NumberTiles/2)) { (void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g", degrees,2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImageChannel(preview_image,DefaultChannels,gamma); (void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse); (void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius, (size_t) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MaxTextExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MaxTextExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MaxTextExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MaxTextExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MaxTextExtent); break; } case 6: { (void) CopyMagickString(factor,"poisson",MaxTextExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail, (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); (void) FormatLocaleString(label,MaxTextExtent,"threshold %g", (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange* percentage/100.0); (void) FormatLocaleString(label,MaxTextExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g", degrees,degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold); (void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g", 0.5*degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MaxTextExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MaxTextExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MaxTextExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MaxTextExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ", factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a rotational blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RotationalBlurImageChannel(const Image *image, % const ChannelType channel,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the rotational blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { Image *blur_image; blur_image=RotationalBlurImageChannel(image,DefaultChannels,angle,exception); return(blur_image); } MagickExport Image *RotationalBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { CacheView *blur_view, *image_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType blur_radius, *cos_theta, offset, *sin_theta, theta; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRadialBlurImage(image,channel,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(MagickRealType) (n-1); cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (MagickRealType *) NULL) || (sin_theta == (MagickRealType *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(MagickRealType) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket qixel; MagickRealType normalize, radius; PixelPacket pixel; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } normalize=0.0; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); qixel.red+=pixel.red; qixel.green+=pixel.green; qixel.blue+=pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*indexes); } normalize+=1.0; } normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(normalize*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(normalize*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index)); } else { double alpha, gamma; alpha=1.0; gamma=0.0; for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=alpha*pixel.red; qixel.green+=alpha*pixel.green; qixel.blue+=alpha*pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=alpha*(*indexes); } gamma+=alpha; normalize+=1.0; } gamma=PerceptibleReciprocal(gamma); normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta); sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % Image *SelectiveBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { Image *blur_image; blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma, threshold,exception); return(blur_image); } MagickExport Image *SelectiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; double *kernel; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, width*sizeof(*kernel))); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace); if (status == MagickFalse) { InheritException(exception,&luminance_image->exception); kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); luminance_image=DestroyImage(luminance_image); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L)); GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double gamma; MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict l, *magick_restrict p; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { double contrast; DoublePixelPacket pixel; MagickRealType intensity; register const double *magick_restrict k; register ssize_t u; ssize_t j, v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel; intensity=GetPixelIntensity(image,p+center); gamma=0.0; j=0; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.red+=(*k)*GetPixelRed(p+u+j); pixel.green+=(*k)*GetPixelGreen(p+u+j); pixel.blue+=(*k)*GetPixelBlue(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.opacity+=(*k)*(p+u+j)->opacity; gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } else { MagickRealType alpha; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j)); pixel.red+=(*k)*alpha*GetPixelRed(p+u+j); pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j); pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j); pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k)*alpha; } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); k++; } j+=(ssize_t) (image->columns+width); } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(p+u+j)); pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } p++; l++; q++; } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(double *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse) { InheritException(exception,&shade_image->exception); linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { MagickRealType distance, normal_distance, shade; PrimaryInfo normal; register const PixelPacket *magick_restrict p, *magick_restrict s0, *magick_restrict s1, *magick_restrict s2; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { /* Determine the surface normal and compute shading. */ s0=p+1; s1=s0+image->columns+2; s2=s1+image->columns+2; normal.x=(double) (GetPixelIntensity(linear_image,s0-1)+ GetPixelIntensity(linear_image,s1-1)+ GetPixelIntensity(linear_image,s2-1)- GetPixelIntensity(linear_image,s0+1)- GetPixelIntensity(linear_image,s1+1)- GetPixelIntensity(linear_image,s2+1)); normal.y=(double) (GetPixelIntensity(linear_image,s2-1)+ GetPixelIntensity(linear_image,s2)+ GetPixelIntensity(linear_image,s2+1)- GetPixelIntensity(linear_image,s0-1)- GetPixelIntensity(linear_image,s0)- GetPixelIntensity(linear_image,s0+1)); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z* normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } if (gray != MagickFalse) { SetPixelRed(q,shade); SetPixelGreen(q,shade); SetPixelBlue(q,shade); } else { SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1))); SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1))); SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1))); } q->opacity=s1->opacity; p++; q++; } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *SharpenImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception); return(sharp_image); } MagickExport Image *SharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a block defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: Choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse) { InheritException(exception,&spread_image->exception); spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(spread_image,&bias); width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,spread_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) spread_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(spread_view); pixel=bias; for (x=0; x < (ssize_t) spread_image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolateMagickPixelPacket(image,image_view,image->interpolate, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),&pixel, exception); if (status == MagickFalse) break; SetPixelPacket(spread_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % Image *UnsharpMaskImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double gain,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { Image *sharp_image; sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain, threshold,exception); return(sharp_image); } MagickExport Image *UnsharpMaskImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double gain,const double threshold,ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,channel,radius,sigma,gain, threshold,exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImageChannel(image,(ChannelType) (channel &~ SyncChannels), radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(MagickRealType) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket pixel; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict unsharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); if (fabs(2.0*pixel.red) < quantum_threshold) pixel.red=(MagickRealType) GetPixelRed(p); else pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain); SetPixelRed(q,ClampToQuantum(pixel.red)); } if ((channel & GreenChannel) != 0) { pixel.green=GetPixelGreen(p)-(MagickRealType) q->green; if (fabs(2.0*pixel.green) < quantum_threshold) pixel.green=(MagickRealType) GetPixelGreen(p); else pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain); SetPixelGreen(q,ClampToQuantum(pixel.green)); } if ((channel & BlueChannel) != 0) { pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue; if (fabs(2.0*pixel.blue) < quantum_threshold) pixel.blue=(MagickRealType) GetPixelBlue(p); else pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain); SetPixelBlue(q,ClampToQuantum(pixel.blue)); } if ((channel & OpacityChannel) != 0) { pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity; if (fabs(2.0*pixel.opacity) < quantum_threshold) pixel.opacity=(MagickRealType) GetPixelOpacity(p); else pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel.index=GetPixelIndex(indexes+x)-(MagickRealType) GetPixelIndex(unsharp_indexes+x); if (fabs(2.0*pixel.index) < quantum_threshold) pixel.index=(MagickRealType) GetPixelIndex(indexes+x); else pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+ (pixel.index*gain); SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
main_seqval.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ /* These need to be before any possible inclusions of stdint.h or inttypes.h. * */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include "../generator/make_graph.h" #include "../generator/utils.h" #include "common.h" #include <math.h> #include <mpi.h> #include <assert.h> #include <string.h> #include <stdlib.h> #include <stddef.h> #include <stdio.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> #ifdef SHOWCPUAFF #include <sys/types.h> #include <unistd.h> #endif static int compare_doubles(const void* a, const void* b) { double aa = *(const double*)a; double bb = *(const double*)b; return (aa < bb) ? -1 : (aa == bb) ? 0 : 1; } enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST}; static void get_statistics(const double x[], int n, double r[s_LAST]) { double temp; int i; /* Compute mean. */ temp = 0; for (i = 0; i < n; ++i) temp += x[i]; temp /= n; r[s_mean] = temp; /* Compute std. dev. */ temp = 0; for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]); temp /= n - 1; r[s_std] = sqrt(temp); /* Sort x. */ double* xx = (double*)xmalloc(n * sizeof(double)); memcpy(xx, x, n * sizeof(double)); qsort(xx, n, sizeof(double), compare_doubles); /* Get order statistics. */ r[s_minimum] = xx[0]; r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5; r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5; r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5; r[s_maximum] = xx[n - 1]; /* Clean up. */ free(xx); } static inline int64_t get_pred_from_pred_entry(int64_t val) { return (val << 16) >> 16; } /* Returns true if all values are in range. */ //static int check_value_ranges(const int64_t nglobalverts, const size_t nlocalverts, const int64_t* const pred) { int any_range_errors = 0; { size_t ii; for (ii = 0; ii < nlocalverts; ii += CHUNKSIZE) { ptrdiff_t i_start = ii; ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts); ptrdiff_t i; assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts); assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts); #pragma omp parallel for reduction(||:any_range_errors) for (i = i_start; i < i_end; ++i) { int64_t p = get_pred_from_pred_entry(pred[i]); if (p < -1 || p >= nglobalverts) { fprintf(stdout, "%d: Validation error: parent of vertex %" PRId64 " is out-of-range value %" PRId64 ".\n", rank, vertex_to_global_for_pred(rank, i), p); any_range_errors = 1; } } } } MPI_Allreduce(MPI_IN_PLACE, &any_range_errors, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); return !any_range_errors; } /* Returns true if result is valid. Also, updates high 16 bits of each element * of pred to contain the BFS level number (or -1 if not visited) of each * vertex; this is based on the predecessor map if the user didn't provide it. * */ int validate_bfs_result_seq(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const int64_t root, int64_t* const pred, int64_t* const edge_visit_count_ptr, int64_t const max_used_vertex) { assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); *edge_visit_count_ptr = 0; /* Ensure it is a valid pointer */ int ranges_ok = check_value_ranges(nglobalverts, nlocalverts, pred); if (root < 0 || root >= nglobalverts) { fprintf(stderr, "%d: Validation error: root vertex %" PRId64 " is invalid.\n", rank, root); ranges_ok = 0; } if (!ranges_ok) return 0; /* Fail */ int validation_passed = 1; int root_owner; size_t root_local; get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local); int root_is_mine = (root_owner == rank); /* Get maximum values so loop counts are consistent across ranks. */ uint64_t maxlocalverts_ui = nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &maxlocalverts_ui, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); size_t maxlocalverts = (size_t)maxlocalverts_ui; ptrdiff_t max_bufsize = tuple_graph_max_bufsize(tg); ptrdiff_t edge_chunk_size = ptrdiff_min(HALF_CHUNKSIZE, max_bufsize); assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); /* combine results from all processes */ int64_t* restrict pred_vtx = NULL; { int irank; uint64_t i; int64_t nlocalvertsMax=nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &nlocalvertsMax, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); if(rank==0) { pred_vtx = (int64_t*)xmalloc(nglobalverts * sizeof(int64_t)); int64_t* pred_tmp; int64_t nlocalvertsRemote; pred_tmp=pred; nlocalvertsRemote=nlocalverts; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(irank!=0) { MPI_Recv(&nlocalvertsRemote, 1, MPI_UINT64_T, irank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(pred_tmp, nlocalvertsRemote, MPI_UINT64_T, irank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); //printf("%d %" PRId64 " \n",rank,nlocalvertsRemote); } for(i=0;i<nlocalvertsRemote ;i++) { pred_vtx[vertex_to_global_for_pred(irank,i)]=get_pred_from_pred_entry(pred_tmp[i]); } if(irank==0) pred_tmp = (int64_t*)xmalloc(nlocalvertsMax * sizeof(int64_t)); } xfree(pred_tmp); } else { for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(rank==irank) { MPI_Send(&nlocalverts, 1, MPI_UINT64_T, 0, 0, MPI_COMM_WORLD); MPI_Send(pred, nlocalverts, MPI_UINT64_T, 0, 1, MPI_COMM_WORLD); } } } { int irank; uint64_t i; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); //if(rank==irank) // for(i=0;i<nlocalverts ;i++) // fprintf(stderr, "%d %" PRId64 " %" PRId64 " %" PRId64 "\n", rank,i,get_pred_from_pred_entry(pred[i]),vertex_to_global_for_pred(rank,i)); } } } int64_t nedge_traversed=0; if(rank==0) { uint64_t i, max_bfsvtx=0; /*for(i=0;i<tg->edgememory_size ;i++) { if(tg->edgememory[i].v0>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v0; if(tg->edgememory[i].v1>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v1; }*/ /*int64_t* restrict pred_vtx = (int64_t*)xmalloc((max_used_vertex+1) * sizeof(int64_t)); for(i=0;i<=max_used_vertex ;i++) { pred_vtx[i]=get_pred_from_pred_entry(pred[i]); }*/ nedge_traversed=verify_bfs_tree (pred_vtx, max_used_vertex, root, tg->edgememory, tg->nglobaledges); fprintf(stderr, "verify_bfs_tree nedge_traversed %" PRId64 ".\n", nedge_traversed); if(nedge_traversed<0) { fprintf(stderr, "Validation error: code %" PRId64 ".\n", nedge_traversed); validation_passed=0; } } if(rank==0) { xfree(pred_vtx); } MPI_Allreduce(MPI_IN_PLACE, &nedge_traversed, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); *edge_visit_count_ptr=nedge_traversed; /* Collect the global validation result. */ MPI_Allreduce(MPI_IN_PLACE, &validation_passed, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); return validation_passed; } int validate_bfs_result_seq2(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const int64_t root, int64_t* const pred, int64_t* const edge_visit_count_ptr, int64_t const max_used_vertex) { assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); *edge_visit_count_ptr = 0; /* Ensure it is a valid pointer */ int ranges_ok = check_value_ranges(nglobalverts, nlocalverts, pred); if (root < 0 || root >= nglobalverts) { fprintf(stdout, "%d: Validation error: root vertex %" PRId64 " is invalid.\n", rank, root); ranges_ok = 0; } if (!ranges_ok) return 0; /* Fail */ int validation_passed = 1; int root_owner; size_t root_local; get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local); int root_is_mine = (root_owner == rank); /* Get maximum values so loop counts are consistent across ranks. */ uint64_t maxlocalverts_ui = nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &maxlocalverts_ui, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); size_t maxlocalverts = (size_t)maxlocalverts_ui; ptrdiff_t max_bufsize = tuple_graph_max_bufsize(tg); ptrdiff_t edge_chunk_size = ptrdiff_min(HALF_CHUNKSIZE, max_bufsize); assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); fprintf(stdout, "%d: validate_bfs_result_seq1\n", rank); /* combine results from all processes */ int64_t* restrict pred_vtx = NULL; { int irank; uint64_t i; int64_t nlocalvertsMax=nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &nlocalvertsMax, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); printf("rank,nlocalverts %d %" PRId64 " max %" PRId64 " \n",rank,nlocalverts,nlocalvertsMax); fprintf(stdout, "%d: validate_bfs_result_seq2\n", rank); if(rank==0) { fprintf(stdout, "%d: validate_bfs_result_seq2.1\n", rank); pred_vtx = (int64_t*)xmalloc(nglobalverts * sizeof(int64_t)); for(i=0;i<nlocalverts ;i++) { pred_vtx[vertex_to_global_for_pred(irank,i)]=get_pred_from_pred_entry(pred[i]); } fprintf(stdout, "%d: validate_bfs_result_seq2.2\n", rank); int64_t* pred_tmp = (int64_t*)xmalloc(nlocalvertsMax * sizeof(int64_t)); int64_t nlocalvertsRemote; fprintf(stdout, "%d: validate_bfs_result_seq2.3\n", rank); MPI_Barrier(MPI_COMM_WORLD); fprintf(stdout, "%d: validate_bfs_result_seq2.4\n", rank); for(irank=1;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); MPI_Recv(&nlocalvertsRemote, 1, MPI_UINT64_T, irank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(pred_tmp, nlocalvertsRemote, MPI_UINT64_T, irank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); printf("rank,nlocalvertsRemote %d %" PRId64 " \n",rank,nlocalvertsRemote); for(i=0;i<nlocalvertsRemote ;i++) { pred_vtx[vertex_to_global_for_pred(irank,i)]=get_pred_from_pred_entry(pred_tmp[i]); } } xfree(pred_tmp); fprintf(stdout, "%d: validate_bfs_result_seq2.5\n", rank); } else { MPI_Barrier(MPI_COMM_WORLD); for(irank=1;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(rank==irank) { MPI_Send(&nlocalverts, 1, MPI_UINT64_T, 0, 0, MPI_COMM_WORLD); printf("Srank,nlocalvertsRemote %d %" PRId64 " \n",rank,nlocalverts); MPI_Send(pred, nlocalverts, MPI_UINT64_T, 0, 1, MPI_COMM_WORLD); } } } fprintf(stdout, "%d: validate_bfs_result_seq3\n", rank); /*{ int irank; uint64_t i; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); //if(rank==irank) // for(i=0;i<nlocalverts ;i++) // fprintf(stdout, "%d %" PRId64 " %" PRId64 " %" PRId64 "\n", rank,i,get_pred_from_pred_entry(pred[i]),vertex_to_global_for_pred(rank,i)); } }*/ } MPI_Barrier(MPI_COMM_WORLD); int64_t nedge_traversed; if(rank==0) { uint64_t i, max_bfsvtx=0; /*for(i=0;i<tg->edgememory_size ;i++) { if(tg->edgememory[i].v0>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v0; if(tg->edgememory[i].v1>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v1; }*/ /*int64_t* restrict pred_vtx = (int64_t*)xmalloc((max_used_vertex+1) * sizeof(int64_t)); for(i=0;i<=max_used_vertex ;i++) { pred_vtx[i]=get_pred_from_pred_entry(pred[i]); }*/ nedge_traversed=verify_bfs_tree (pred_vtx, max_used_vertex, root, tg->edgememory, tg->nglobaledges); if(nedge_traversed<0) { fprintf(stdout, "Validation error: code %" PRId64 ".\n", nedge_traversed); validation_passed=0; } } if(rank==0) { xfree(pred_vtx); } fprintf(stdout, "%d: validate_bfs_result_seqF\n", rank); MPI_Barrier(MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &nedge_traversed, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); *edge_visit_count_ptr=nedge_traversed; /* Collect the global validation result. */ MPI_Allreduce(MPI_IN_PLACE, &validation_passed, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); return validation_passed; } int main(int argc, char** argv) { MPI_Init(&argc, &argv); setup_globals(); /* Parse arguments. */ int SCALE = 16; int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ int num_bfs_roots = 64; int bDoOneNodePureOpenMP=1; int bRunPerf=1; int bRunVal=1; int bUsePerfForTEPS=1; float timeForPerf=300.0; int numberOfCyclesForPerf=300; //uint8_t refMD5[16]; int64_t* refEdgeCounts = NULL; int64_t* refBFS_Roots = NULL; if ( !(argc == 2 || argc == 3)){ if (rank == 0) fprintf(stdout, "Usage: %s input_file [number of threads]\n", argv[0]); //fprintf(stdout, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); MPI_Abort(MPI_COMM_WORLD, 1); } if ( argc == 3){ int threads=atoi(argv[2]); #ifdef _OPENMP omp_set_num_threads(threads); #else if(threads!=1) fprintf(stdout, "ERROR: %s compiled without OpenMP\n", argv[0]); #endif } if (rank == 0) fprintf(stdout, "Graph500 version: 2.1.4 replicated csc\n"); { int iRead=0; int i; FILE *input_file; char cbuf[256]; if (rank == 0) fprintf(stdout, "Reading input from %s\n",argv[1]); input_file=fopen(argv[1],"r"); if(input_file==NULL){ if (rank == 0) fprintf(stdout, "Error : can no open %s file\n",argv[1]); MPI_Barrier(MPI_COMM_WORLD); MPI_Abort(MPI_COMM_WORLD, 1); } fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&SCALE); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&edgefactor); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&num_bfs_roots); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bDoOneNodePureOpenMP); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bRunPerf); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bUsePerfForTEPS); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bRunVal); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%f",&timeForPerf); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&numberOfCyclesForPerf); if(numberOfCyclesForPerf==0) bRunPerf=0; if(timeForPerf==0.0) bRunPerf=0; if(bRunPerf==0) bUsePerfForTEPS=0; //fgets(cbuf,256,input_file); //for (i = 0; i < 16; i++) // iRead+=sscanf(cbuf+i*2,"%2x",&refMD5[i]); //refMD5[i]=cbuf[i]; refEdgeCounts = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); refBFS_Roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); for (i = 0; i < num_bfs_roots; i++){ fgets(cbuf,256,input_file); iRead+=sscanf(cbuf,"%lu %lu ",refBFS_Roots+i,refEdgeCounts+i); } //printf("%d %d\n",rank,iRead); //printf("%d %d\n",rank,SCALE); //printf("%d %d\n",rank,edgefactor); fprintf(stdout, "rank %d/%d\n",rank,size); if (rank == 0){ fprintf(stdout, "\tScale: %d\n",SCALE); fprintf(stdout, "\tEdgefactor %d\n",edgefactor); fprintf(stdout, "\tNumber of BFS roots: %d\n",num_bfs_roots); fprintf(stdout, "\tUse pure OpenMP Implementation for single node: %d\n",bDoOneNodePureOpenMP); fprintf(stdout, "\tRun performance section: %d\n",bRunPerf); fprintf(stdout, "\tRun validation: %d\n",bRunVal); fprintf(stdout, "\tTime for performance section in seconds: %f\n",timeForPerf); fprintf(stdout, "\tUse only performance section for TEPS calculation: %d\n",bUsePerfForTEPS); fprintf(stdout, "\tMax number of cycles: %d\n",numberOfCyclesForPerf); fprintf(stdout, "\tNumber of MPI processes: %d\n",size); #ifdef _OPENMP fprintf(stdout, "\tMax number of threads per MPI process: %d\n",omp_get_max_threads()); #else fprintf(stdout, "\tMax number of threads per MPI process: compiled without OpenMP\n"); #endif //fprintf(stdout, "\tReffrence md5 on initial edge list: "); //for (i = 0; i < 16; i++) // fprintf(stdout, "%2.2x", refMD5[i]); //fprintf(stdout, "\n"); } fclose(input_file); #ifdef SHOWCPUAFF pid_t pid=getpid(); for (i = 0; i < size; i++){ if(i==rank){ fprintf(stdout, "MPI Process %d:\n",rank); sprintf(cbuf,"grep -i cpus_allowed /proc/%d/status",pid); system(cbuf); fprintf(stdout, "\n"); } MPI_Barrier(MPI_COMM_WORLD); } #endif MPI_Barrier(MPI_COMM_WORLD); //MPI_Barrier(MPI_COMM_WORLD); //MPI_Abort(MPI_COMM_WORLD, 1); } // int SCALE = 16; // int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ // if (argc >= 2) SCALE = atoi(argv[1]); // if (argc >= 3) edgefactor = atoi(argv[2]); // if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) { // if (rank == 0) { // fprintf(stdout, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); // } // MPI_Abort(MPI_COMM_WORLD, 1); // } uint64_t seed1 = 2, seed2 = 3; const char* filename = getenv("TMPFILE"); /* If filename is NULL, store data in memory */ tuple_graph tg; tg.nglobaledges = (int64_t)(edgefactor) << SCALE; int64_t nglobalverts = (int64_t)(1) << SCALE; tg.data_in_file = (filename != NULL); if (tg.data_in_file) { MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL); MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile); MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge)); MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL); MPI_File_set_atomicity(tg.edgefile, 0); } /* Make the raw graph edges. */ /* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by * validator). */ //int num_bfs_roots = 64; int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); int64_t max_used_vertex = 0; MPI_Barrier(MPI_COMM_WORLD); double make_graph_start = MPI_Wtime(); { /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(seed1, seed2, seed); /* As the graph is being generated, also keep a bitmap of vertices with * incident edges. We keep a grid of processes, each row of which has a * separate copy of the bitmap (distributed among the processes in the * row), and then do an allreduce at the end. This scheme is used to avoid * non-local communication and reading the file separately just to find BFS * roots. */ MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE; int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT); if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) { bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT); } int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes; int nrows = size / ranks_per_row; int my_row = -1, my_col = -1; unsigned char* restrict has_edge = NULL; MPI_Comm cart_comm; { int dims[2] = {size / ranks_per_row, ranks_per_row}; int periods[2] = {0, 0}; MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm); } int in_generating_rectangle = 0; if (cart_comm != MPI_COMM_NULL) { in_generating_rectangle = 1; { int dims[2], periods[2], coords[2]; MPI_Cart_get(cart_comm, 2, dims, periods, coords); my_row = coords[0]; my_col = coords[1]; } MPI_Comm this_col; MPI_Comm_split(cart_comm, my_col, my_row, &this_col); MPI_Comm_free(&cart_comm); has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes); memset(has_edge, 0, bitmap_size_in_bytes); /* Every rank in a given row creates the same vertices (for updating the * bitmap); only one writes them to the file (or final memory buffer). */ packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge)); MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows; /* fprintf(stdout, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row); */ if (tg.data_in_file) { tg.edgememory_size = 0; tg.edgememory = NULL; } else { int my_pos = my_row + my_col * nrows; int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ? (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) : -1; int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE; int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) + FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) + (my_pos == last_pos ? edges_left : 0); /* fprintf(stdout, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */ tg.edgememory_size = nedges; tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge)); } MPI_Offset block_idx; for (block_idx = 0; block_idx < block_limit; ++block_idx) { /* fprintf(stdout, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */ MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges); MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE); packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ? tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) : buf; /* fprintf(stdout, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */ if (!tg.data_in_file && block_idx % ranks_per_row == my_col) { assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size); } generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf); if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */ MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE); } ptrdiff_t i; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < edge_count; ++i) { int64_t src = get_v0_from_edge(&actual_buf[i]); int64_t tgt = get_v1_from_edge(&actual_buf[i]); if (src == tgt) continue; if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT)); } if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT)); } } } free(buf); #if 0 /* The allreduce for each root acts like we did this: */ MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col); #endif MPI_Comm_free(&this_col); } else { tg.edgememory = NULL; tg.edgememory_size = 0; } MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); /* Find roots and max used vertex */ { uint64_t counter = 0; int bfs_root_idx; for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root; while (1) { double d[2]; make_random_numbers(2, seed1, seed2, counter, d); root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts; counter += 2; if (counter > 2 * nglobalverts) break; int is_duplicate = 0; int i; for (i = 0; i < bfs_root_idx; ++i) { if (root == bfs_roots[i]) { is_duplicate = 1; break; } } if (is_duplicate) continue; /* Everyone takes the same path here */ int root_ok = 0; if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) { root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0; } MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); if (root_ok) break; } bfs_roots[bfs_root_idx] = root; if((refBFS_Roots!=NULL) && (rank==0)){ if(refBFS_Roots[bfs_root_idx] != bfs_roots[bfs_root_idx]) fprintf(stdout,"ERROR: BFS roots do not match reffrence (Ref: %lu Here: %lu)\n",refBFS_Roots[bfs_root_idx], bfs_roots[bfs_root_idx]); } } num_bfs_roots = bfs_root_idx; /* Find maximum non-zero-degree vertex. */ { int64_t i; max_used_vertex = 0; if (in_generating_rectangle) { for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) { if (i > nglobalverts) continue; if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) { max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes; break; } } } MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); } } if (in_generating_rectangle) { MPI_Free_mem(has_edge); } if (tg.data_in_file) { MPI_File_sync(tg.edgefile); } } MPI_Barrier(MPI_COMM_WORLD); double make_graph_stop = MPI_Wtime(); double make_graph_time = make_graph_stop - make_graph_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stdout, "graph_generation: %f s\n", make_graph_time); } /* Make user's graph data structure. */ MPI_Barrier(MPI_COMM_WORLD); double data_struct_start = MPI_Wtime(); #ifdef DOONENODEOMPPURE if((size==1)&&(bDoOneNodePureOpenMP==1)){ create_graph_from_edgelist (tg.edgememory, tg.nglobaledges); } else make_graph_data_structure(&tg); #else make_graph_data_structure(&tg); #endif MPI_Barrier(MPI_COMM_WORLD); double data_struct_stop = MPI_Wtime(); double data_struct_time = data_struct_stop - data_struct_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stdout, "construction_time: %f s\n", data_struct_time); } /* Number of edges visited in each BFS; a double so get_statistics can be * used directly. */ double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double)); int64_t* edge_counts_ul = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); /* Run BFS. */ int validation_passed = 1; double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); uint64_t nlocalverts = get_nlocalverts_for_pred(); int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); #ifdef DOONENODEOMPPURE if((size==1)&&(bDoOneNodePureOpenMP==1)){ nlocalverts = tg.nglobaledges; pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); } else{ nlocalverts = get_nlocalverts_for_pred(); pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); } #else nlocalverts = get_nlocalverts_for_pred(); pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); #endif int bfs_root_idx; int CyclesPassed=0; int ValidationStep=0; if(bRunPerf==0) { ValidationStep=1; numberOfCyclesForPerf=1; } for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) bfs_times[bfs_root_idx]=0.0; double performance_start = MPI_Wtime(); int count=0; while(1){ MPI_Barrier(MPI_COMM_WORLD); if (rank == 0)fprintf(stdout, "Starting cycle %d.\n", CyclesPassed); for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root = bfs_roots[bfs_root_idx]; if ((rank == 0)&&(ValidationStep)) fprintf(stdout, "Running BFS %d\n", bfs_root_idx); /* Clear the pred array. */ memset(pred, 0, nlocalverts * sizeof(int64_t)); /* Do the actual BFS. */ MPI_Barrier(MPI_COMM_WORLD); double bfs_start = MPI_Wtime(); #ifdef DOONENODEOMPPURE int64_t max_bfsvtx; if((size==1)&&(bDoOneNodePureOpenMP==1)){ make_bfs_tree (&pred[0], &max_bfsvtx, root); } else run_bfs(root, &pred[0]); #else run_bfs(root, &pred[0]); #endif MPI_Barrier(MPI_COMM_WORLD); double bfs_stop = MPI_Wtime(); if( (!ValidationStep) || (bUsePerfForTEPS==0)){ bfs_times[bfs_root_idx] += bfs_stop - bfs_start; count+=1; } //&&(ValidationStep) if ((rank == 0)) fprintf(stdout, "Time for BFS %d is %f\n", bfs_root_idx, bfs_stop - bfs_start); /* Validate result. */ //if (!getenv("SKIP_VALIDATION")) { if (ValidationStep) { if (rank == 0) fprintf(stdout, "Validating BFS %d\n", bfs_root_idx); MPI_Barrier(MPI_COMM_WORLD); double validate_start = MPI_Wtime(); int64_t edge_visit_count; int validation_passed_one; #ifdef DOONENODEOMPPURE if((size==1)&&(bDoOneNodePureOpenMP==1)){ int64_t result; result=verify_bfs_tree (&pred[0], max_bfsvtx, root, tg.edgememory, tg.nglobaledges); if (result < 0){ fprintf(stdout, "Validation error: code %" PRId64 ".\n", result); validation_passed_one=0; edge_visit_count=1; } else { edge_visit_count=result; validation_passed_one=1; } } else{ if(bRunVal==1){ //fprintf(stdout, "validate_bfs_result_seq\n"); validation_passed_one = validate_bfs_result_seq(&tg, nglobalverts, nlocalverts, root, pred, &edge_visit_count,max_used_vertex); } else{ //fprintf(stdout, "validate_bfs_result\n"); validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); } } #else if(bRunVal==1) validation_passed_one = validate_bfs_result_seq(&tg, nglobalverts, nlocalverts, root, pred, &edge_visit_count,max_used_vertex); else validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); #endif //int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); MPI_Barrier(MPI_COMM_WORLD); double validate_stop = MPI_Wtime(); validate_times[bfs_root_idx] = validate_stop - validate_start; if (rank == 0) fprintf(stdout, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]); edge_counts[bfs_root_idx] = (double)edge_visit_count; edge_counts_ul[bfs_root_idx] = edge_visit_count; if (rank == 0) fprintf(stdout, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / (bfs_stop - bfs_start)); if((refEdgeCounts!=NULL) && (rank==0)){ if(refEdgeCounts[bfs_root_idx]!=edge_counts_ul[bfs_root_idx]) fprintf(stdout,"ERROR: Edge count do not match reference (Ref: %lu Here: %lu)\n",refEdgeCounts[bfs_root_idx], edge_counts_ul[bfs_root_idx]); } if (!validation_passed_one) { validation_passed = 0; if (rank == 0) fprintf(stdout, "Validation failed for this BFS root; skipping rest.\n"); break; } } } CyclesPassed++; if((MPI_Wtime()-performance_start>=timeForPerf)||(CyclesPassed>=numberOfCyclesForPerf)){ if(bRunVal){ if(ValidationStep==0) ValidationStep=1; else break; } else break; } if (validation_passed==0) break; } if (rank == 0) fprintf(stdout,"Completed %d cycles\n", CyclesPassed); for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { bfs_times[bfs_root_idx]/=(count/num_bfs_roots); } MPI_Barrier(MPI_COMM_WORLD); /* Print results. */ if (rank == 0) { int i; for (i = 0; i < num_bfs_roots; ++i) fprintf(stdout, "%lu %lu # [%2d] bfs_roots edge_visit_count\n",bfs_roots[i],edge_counts_ul[i],i); if (!validation_passed) { fprintf(stdout, "No results printed for invalid run.\n"); } else { int i; fprintf(stdout, "SCALE: %d\n", SCALE); fprintf(stdout, "edgefactor: %d\n", edgefactor); fprintf(stdout, "NBFS: %d\n", num_bfs_roots); fprintf(stdout, "graph_generation: %g\n", make_graph_time); fprintf(stdout, "num_mpi_processes: %d\n", size); fprintf(stdout, "construction_time: %g\n", data_struct_time); double stats[s_LAST]; get_statistics(bfs_times, num_bfs_roots, stats); fprintf(stdout, "min_time: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_time: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_time: %g\n", stats[s_maximum]); fprintf(stdout, "mean_time: %g\n", stats[s_mean]); fprintf(stdout, "stddev_time: %g\n", stats[s_std]); get_statistics(edge_counts, num_bfs_roots, stats); fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]); fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]); fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]); fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]); fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]); fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]); double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double)); for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i]; get_statistics(secs_per_edge, num_bfs_roots, stats); fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]); fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]); fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]); fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]); fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]); fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]); /* Formula from: * Title: The Standard Errors of the Geometric and Harmonic Means and * Their Application to Index Numbers * Author(s): Nilan Norris * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448 * Publisher(s): Institute of Mathematical Statistics * Stable URL: http://www.jstor.org/stable/2235723 * (same source as in specification). */ fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1))); free(secs_per_edge); secs_per_edge = NULL; free(edge_counts); edge_counts = NULL; get_statistics(validate_times, num_bfs_roots, stats); fprintf(stdout, "min_validate: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_validate: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_validate: %g\n", stats[s_maximum]); fprintf(stdout, "mean_validate: %g\n", stats[s_mean]); fprintf(stdout, "stddev_validate: %g\n", stats[s_std]); #if 0 for (i = 0; i < num_bfs_roots; ++i) { fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]); } #endif } } #ifdef SHOWCPUAFF { int i; char cbuf[256]; MPI_Barrier(MPI_COMM_WORLD); pid_t pid=getpid(); for (i = 0; i < size; i++){ if(i==rank){ fprintf(stdout, "MPI Process %d, memory usage:\n",rank); sprintf(cbuf,"grep -i Vm /proc/%d/status",pid); system(cbuf); fprintf(stdout, "\n"); } MPI_Barrier(MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); } #endif MPI_Free_mem(pred); free(bfs_roots); free_graph_data_structure(); if (tg.data_in_file) { MPI_File_close(&tg.edgefile); } else { free(tg.edgememory); tg.edgememory = NULL; } free(bfs_times); free(validate_times); cleanup_globals(); MPI_Finalize(); return 0; }
GB_binop__band_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint32) // A*D function (colscale): GB (_AxD__band_uint32) // D*A function (rowscale): GB (_DxB__band_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__band_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__band_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint32) // C=scalar+B GB (_bind1st__band_uint32) // C=scalar+B' GB (_bind1st_tran__band_uint32) // C=A+scalar GB (_bind2nd__band_uint32) // C=A'+scalar GB (_bind2nd_tran__band_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT32 || GxB_NO_BAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__band_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__band_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__band_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ParallelOpenMP.h
#pragma once #include <ATen/ATen.h> #include <cstddef> #include <exception> #ifdef _OPENMP #define INTRA_OP_PARALLEL #include <omp.h> #endif namespace at { template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f) { TORCH_CHECK(grain_size >= 0); if (begin >= end) { return; } #ifdef _OPENMP std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; // choose number of tasks based on grain size and number of threads int64_t num_threads = omp_in_parallel() ? 1 : omp_get_max_threads(); if (grain_size > 0) { num_threads = std::min(num_threads, divup((end - begin), grain_size)); } #pragma omp parallel num_threads(num_threads) { int64_t num_threads = omp_get_num_threads(); int64_t tid = omp_get_thread_num(); int64_t chunk_size = divup((end - begin), num_threads); int64_t begin_tid = begin + tid * chunk_size; if (begin_tid < end) { try { f(begin_tid, std::min(end, chunk_size + begin_tid)); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } } if (eptr) { std::rethrow_exception(eptr); } #else f(begin, end); #endif } template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F& f, const SF& sf) { TORCH_CHECK(grain_size >= 0); if (begin >= end) { return ident; } else if (in_parallel_region() || get_num_threads() == 1) { return f(begin, end, ident); } else { const int64_t num_results = divup((end - begin), grain_size); std::vector<scalar_t> results(num_results); scalar_t* results_data = results.data(); std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; #pragma omp parallel for if ((end - begin) >= grain_size) for (int64_t id = 0; id < num_results; id++) { int64_t i = begin + id * grain_size; try { results_data[id] = f(i, i + std::min(end - i, grain_size), ident); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } if (eptr) { std::rethrow_exception(eptr); } scalar_t result = ident; for (auto partial_result : results) { result = sf(result, partial_result); } return result; } } } // namespace at
reduction_gomp.c
/****************************************************************************** * FILE: omp_reduction.c * DESCRIPTION: * OpenMP Example - Combined Parallel Loop Reduction - C/C++ Version * This example demonstrates a sum reduction within a combined parallel loop * construct. Notice that default data element scoping is assumed - there * are no clauses specifying shared or private variables. OpenMP will * automatically make loop index variables private within team threads, and * global variables shared. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int i, n; float a[100], b[100], sum; /* Some initializations */ n = 100; for (i=0; i < n; i++) a[i] = b[i] = i * 1.0; sum = 0.0; #pragma omp parallel for reduction(+:sum) for (i=0; i < n; i++) sum = sum + (a[i] * b[i]); printf(" Sum = %f\n",sum); }
distribute_parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp distribute parallel for simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd foo void test_no_clause() { int i; #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}} #pragma omp distribute parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd firstprivate(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp distribute parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+3 2 {{lastprivate variable cannot be firstprivate}} expected-note@+3 2 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp distribute parallel for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp distribute parallel for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp distribute parallel for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp distribute parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute parallel for simd'}} for (int i = 0; i < 10; ++i) ; }
reduction_plus_2.c
// PASS: * // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t // RUN: diff <(mpirun -np 4 %t) %s.reference_output #include <stdlib.h> #include <stdio.h> #include <omp.h> int main() { int result = 100; #pragma omp parallel reduction(+:result) { int rank = omp_get_thread_num(); result += rank; } printf("Result: %d\n", result); }
GB_binop__isgt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_uint16 // A.*B function (eWiseMult): GB_AemultB__isgt_uint16 // A*D function (colscale): GB_AxD__isgt_uint16 // D*A function (rowscale): GB_DxB__isgt_uint16 // C+=B function (dense accum): GB_Cdense_accumB__isgt_uint16 // C+=b function (dense accum): GB_Cdense_accumb__isgt_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_uint16 // C=scalar+B GB_bind1st__isgt_uint16 // C=scalar+B' GB_bind1st_tran__isgt_uint16 // C=A+scalar GB_bind2nd__isgt_uint16 // C=A'+scalar GB_bind2nd_tran__isgt_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT16 || GxB_NO_ISGT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isgt_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_smithW-v2-ifClause.c
/********************************************************************************* * Smith–Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * * Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode * gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run * Execution: ./omp_smithW <number_of_col> <number_of_rows> * * Updated by C. Liao, Jan 2nd, 2019 *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> #include <assert.h> #include <stdbool.h> // C99 does not support the boolean data type #include "parameters.h" /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) // #define DEBUG /* End of Helpers */ /*-------------------------------------------------------------------- * Functions Prototypes */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos); int matchMissmatchScore(long long int i, long long int j); void backtrack(int* P, long long int maxPos); void printMatrix(int* matrix); void printPredecessorMatrix(int* matrix); void generate(void); long long int nElement(long long int i); void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ bool useBuiltInData=true; //Defines size of strings to be compared long long int m = 8 ; //Columns - Size of string a long long int n = 9; //Lines - Size of string b // the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s. //Defines scores int matchScore = 3; int missmatchScore = -3; int gapScore = -2; //Strings over the Alphabet Sigma char *a, *b; /* End of global variables */ /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { // thread_count is no longer used int thread_count; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) printf ("Using built-in data for testing ..\n"); printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF); //#endif //Allocates a and b a = (char*) malloc(m * sizeof(char)); b = (char*) malloc(n * sizeof(char)); //Because now we have zeros m++; n++; //Allocates similarity matrix H int *H; H = (int *) calloc(m * n, sizeof(int)); //Allocates predecessor matrix P int *P; P = (int *)calloc(m * n, sizeof(int)); if (useBuiltInData) { //Uncomment this to test the sequence available at //http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 // OBS: m=11 n=7 // a[0] = 'C'; // a[1] = 'G'; // a[2] = 'T'; // a[3] = 'G'; // a[4] = 'A'; // a[5] = 'A'; // a[6] = 'T'; // a[7] = 'T'; // a[8] = 'C'; // a[9] = 'A'; // a[10] = 'T'; // b[0] = 'G'; // b[1] = 'A'; // b[2] = 'C'; // b[3] = 'T'; // b[4] = 'T'; // b[5] = 'A'; // b[6] = 'C'; // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } //Start position for backtrack long long int maxPos = 0; //Calculates the similarity matrix long long int i, j; // The way to generate all wavefront is to go through the top edge elements // starting from the left top of the matrix, go to the bottom top -> down, then left->right // total top edge element count = dim1_size + dim2_size -1 //Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; #ifdef DEBUG printf("nDiag=%d\n", nDiag); printf("Number of wavefront lines and their first element positions:\n"); #endif #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); printf ("if-clause() Using %d out of max %d threads...", thread_count, omp_get_max_threads()); } } //Gets Initial time double initialTime = omp_get_wtime(); // #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i) { for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding { long long int nEle, si, sj; nEle = nElement(i); calcFirstDiagElement(i, &si, &sj); #pragma omp parallel for private(j) shared (nEle, si, sj, H, P, maxPos) if (nEle>=CUTOFF) for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside } } // for end nDiag } // end omp parallel double finalTime = omp_get_wtime(); printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime); initialTime = omp_get_wtime(); backtrack(P, maxPos); finalTime = omp_get_wtime(); //Gets backtrack time finalTime = omp_get_wtime(); printf("Elapsed time for backtracking: %f\n", finalTime - initialTime); if (useBuiltInData) { printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false"); assert (H[n*m-1]==7); } #ifdef DEBUG printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); #endif //Frees similarity matrixes free(H); free(P); //Frees input arrays free(a); free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal's elements * i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored. */ long long int nElement(long long int i) { if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size return min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); return 2 * min - i + llabs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding * Purpose: Calculate the position of (si, sj)-element * n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { // smaller than row count *si = i; *sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix *si = n - 1; // i is fixed *sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } } /* // understanding the calculation by an example n =6 // row m =2 // col padded scoring matrix n=7 m=3 0 1 2 ------- 0 x x x 1 x x x 2 x x x 3 x x x 4 x x x 5 x x x 6 x x x We should peel off top row and left column since they are the padding the remaining 6x2 sub matrix is what is interesting for us Now find the number of wavefront lines and their first element's position in the scoring matrix total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1 We use the left most element in each wavefront line as its first element. Then we have the first elements like (1,1), (2,1) (3,1) .. (6,1) (6,2) */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j) * int *P; the predecessor array,storing which of the three elements is picked with max value */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal diag = H[index - m - 1] + matchMissmatchScore(i, j); //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { #pragma omp critical *maxPos = index; } } /* End of similarityScore */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ int matchMissmatchScore(long long int i, long long int j) { if (a[j - 1] == b[i - 1]) return matchScore; else return missmatchScore; } /* End of matchMissmatchScore */ /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ void backtrack(int* P, long long int maxPos) { //hold maxPos value long long int predPos; //backtrack from maxPos to startPos = 0 do { if (P[maxPos] == DIAGONAL) predPos = maxPos - m - 1; else if (P[maxPos] == UP) predPos = maxPos - m; else if (P[maxPos] == LEFT) predPos = maxPos - 1; P[maxPos] *= PATH; maxPos = predPos; } while (P[maxPos] != NONE); } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(int* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
Example_teams.4.c
/* * @@name: teams.4c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ #define N 1024*1024 float dotprod(float B[], float C[]) { float sum = 0.0; int i; #pragma omp target map(to: B[0:N], C[0:N]) map(tofrom: sum) #pragma omp teams num_teams(8) thread_limit(16) reduction(+:sum) #pragma omp distribute parallel for reduction(+:sum) \ dist_schedule(static, 1024) schedule(static, 64) for (i=0; i<N; i++) sum += B[i] * C[i]; return sum; } /* Note: The variable sum is now mapped with tofrom, for correct execution with 4.5 (and pre-4.5) compliant compilers. See Devices Intro. */
pcpaes_ecbdecrypt.c
/******************************************************************************* * Copyright 2013-2019 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // AES encryption/decryption (ECB mode) // // Contents: // ippsAESDecryptECB() // */ #include "owndefs.h" #include "owncp.h" #include "pcpaesm.h" #include "pcptool.h" #if defined( _OPENMP ) #include <omp.h> #endif #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPOSITE_GF_) #elif (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) # include "pcprijtables.h" #else #endif /* // AES-ECB denryption // // Parameters: // pSrc pointer to the source data buffer // pDst pointer to the target data buffer // nBlocks number of decrypted data blocks // pCtx pointer to the AES context */ static void cpDecryptAES_ecb(const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks, const IppsAESSpec* pCtx) { #if (_IPP>=_IPP_P8) || (_IPP32E>=_IPP32E_Y8) /* use pipelined version is possible */ if(AES_NI_ENABLED==RIJ_AESNI(pCtx)) { DecryptECB_RIJ128pipe_AES_NI(pSrc, pDst, RIJ_NR(pCtx), RIJ_DKEYS(pCtx), nBlocks*MBS_RIJ128); } else #endif { /* block-by-block decryption */ RijnCipher decoder = RIJ_DECODER(pCtx); while(nBlocks) { //decoder((const Ipp32u*)pSrc, (Ipp32u*)pDst, RIJ_NR(pCtx), RIJ_DKEYS(pCtx), (const Ipp32u (*)[256])RIJ_DEC_SBOX(pCtx)); #if (_ALG_AES_SAFE_==_ALG_AES_SAFE_COMPACT_SBOX_) decoder(pSrc, pDst, RIJ_NR(pCtx), RIJ_EKEYS(pCtx), RijDecSbox/*NULL*/); #else decoder(pSrc, pDst, RIJ_NR(pCtx), RIJ_DKEYS(pCtx), NULL); #endif pSrc += MBS_RIJ128; pDst += MBS_RIJ128; nBlocks--; } } } /*F* // Name: ippsAESDecryptECB // // Purpose: AES-ECB decryption. // // Returns: Reason: // ippStsNullPtrErr pCtx == NULL // pSrc == NULL // pDst == NULL // ippStsContextMatchErr !VALID_AES_ID() // ippStsLengthErr dataLen <1 // ippStsUnderRunErr 0!=(dataLen%MBS_RIJ128) // ippStsNoErr no errors // // Parameters: // pSrc pointer to the source data buffer // pDst pointer to the target data buffer // len input/output buffer length (in bytes) // pCtx pointer to the AES context // *F*/ IPPFUN(IppStatus, ippsAESDecryptECB,(const Ipp8u* pSrc, Ipp8u* pDst, int len, const IppsAESSpec* pCtx)) { /* test context */ IPP_BAD_PTR1_RET(pCtx); /* use aligned AES context */ pCtx = (IppsAESSpec*)( IPP_ALIGNED_PTR(pCtx, AES_ALIGNMENT) ); /* test the context ID */ IPP_BADARG_RET(!VALID_AES_ID(pCtx), ippStsContextMatchErr); /* test source and target buffer pointers */ IPP_BAD_PTR2_RET(pSrc, pDst); /* test stream length */ IPP_BADARG_RET((len<1), ippStsLengthErr); /* test stream integrity */ IPP_BADARG_RET((len&(MBS_RIJ128-1)), ippStsUnderRunErr); /* do encryption */ { int nBlocks = len / MBS_RIJ128; #if !defined(_OPENMP) #if(_IPP32E>=_IPP32E_K0) if (IsFeatureEnabled(ippCPUID_AVX512VAES)) DecryptECB_RIJ128pipe_VAES_NI(pSrc, pDst, len, pCtx); else #endif cpDecryptAES_ecb(pSrc, pDst, nBlocks, pCtx); #else int blk_per_thread = AES_NI_ENABLED==RIJ_AESNI(pCtx)? AESNI128_MIN_BLK_PER_THREAD : RIJ128_MIN_BLK_PER_THREAD; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/blk_per_thread, 1)); if(1==nThreads) cpDecryptAES_ecb(pSrc, pDst, nBlocks, pCtx); else { int blksThreadReg; int blksThreadTail; #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*blksThreadReg * MBS_RIJ128; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*blksThreadReg * MBS_RIJ128; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; cpDecryptAES_ecb(pThreadSrc, pThreadDst, blkThread, pCtx); } } } #endif /* _OPENMP version */ return ippStsNoErr; } }
ast-dump-openmp-task.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp task ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-task.c:3:1, line:6:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1> // CHECK-NEXT: `-OMPTaskDirective {{.*}} <line:4:1, col:17> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt {{.*}} <col:3> // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-task.c:4:1) *const restrict'
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(2*t1-2,3)),ceild(32*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(16*t1+Ny+29,24)),floord(32*t2+Ny+28,24)),floord(32*t1-32*t2+Nz+Ny+27,24));t3++) { for (t4=max(max(max(0,ceild(t1-3,4)),ceild(32*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(16*t1+Nx+29,64)),floord(32*t2+Nx+28,64)),floord(24*t3+Nx+20,64)),floord(32*t1-32*t2+Nz+Nx+27,64));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),24*t3+22),64*t4+62),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
cp-tree.h
/* Definitions for C++ parsing and type checking. Copyright (C) 1987, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_CP_TREE_H #define GCC_CP_TREE_H #include "ggc.h" #include "function.h" #include "hashtab.h" #include "vec.h" /* In order for the format checking to accept the C++ front end diagnostic framework extensions, you must include this file before diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE in c-common.h. */ #undef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cxxdiag__ #if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H) #error \ In order for the format checking to accept the C++ front end diagnostic \ framework extensions, you must include this file before diagnostic-core.h and \ c-common.h, not after. #endif #include "c-family/c-common.h" #include "diagnostic.h" #include "name-lookup.h" /* Usage of TREE_LANG_FLAG_?: 0: IDENTIFIER_MARKED (IDENTIFIER_NODEs) NEW_EXPR_USE_GLOBAL (in NEW_EXPR). DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR). TREE_INDIRECT_USING (in NAMESPACE_DECL). CLEANUP_P (in TRY_BLOCK) AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF) PAREN_STRING_LITERAL (in STRING_CST) DECL_PRETTY_FUNCTION_P (in VAR_DECL) KOENIG_LOOKUP_P (in CALL_EXPR) STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) STMT_EXPR_NO_SCOPE (in STMT_EXPR) BIND_EXPR_TRY_BLOCK (in BIND_EXPR) TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) REFERENCE_REF_P (in INDIRECT_EXPR) QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) OMP_FOR_GIMPLIFYING_P (in OMP_FOR) BASELINK_QUALIFIED_P (in BASELINK) TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX) ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute) CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR) LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR) DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE) VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR) 1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) TI_PENDING_TEMPLATE_FLAG. TEMPLATE_PARMS_FOR_INLINE. DELETE_EXPR_USE_VEC (in DELETE_EXPR). (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). ICS_ELLIPSIS_FLAG (in _CONV) DECL_INITIALIZED_P (in VAR_DECL) TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) STMT_IS_FULL_EXPR_P (in _STMT) TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR) LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR) DECLTYPE_FOR_LAMBDA_RETURN (in DECLTYPE_TYPE) 2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE) LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P (in LAMBDA_EXPR) TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR) 3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out). ICS_BAD_FLAG (in _CONV) FN_TRY_BLOCK_P (in TRY_BLOCK) IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE) BIND_EXPR_BODY_BLOCK (in BIND_EXPR) DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL) 4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, or FIELD_DECL). IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE) DECL_TINFO_P (in VAR_DECL) 5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE) DECL_VTABLE_OR_VTT_P (in VAR_DECL) 6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE) DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL) TYPE_MARKED_P (in _TYPE) Usage of TYPE_LANG_FLAG_?: 0: TYPE_DEPENDENT_P 1: TYPE_HAS_USER_CONSTRUCTOR. 2: unused 3: TYPE_FOR_JAVA. 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE) ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE) 6: TYPE_DEPENDENT_P_VALID Usage of DECL_LANG_FLAG_?: 0: DECL_ERROR_REPORTED (in VAR_DECL). DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL) DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) FUNCTION_PARAMETER_PACK_P (in PARM_DECL) 2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL). DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) 3: DECL_IN_AGGR_P. 4: DECL_C_BIT_FIELD (in a FIELD_DECL) DECL_ANON_UNION_VAR_P (in a VAR_DECL) DECL_SELF_REFERENCE_P (in a TYPE_DECL) DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) 5: DECL_INTERFACE_KNOWN. 6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL). DECL_FIELD_IS_BASE (in FIELD_DECL) 7: DECL_DEAD_FOR_LOCAL (in VAR_DECL). DECL_THUNK_P (in a member FUNCTION_DECL) DECL_NORMAL_CAPTURE_P (in FIELD_DECL) 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL) Usage of language-independent fields in a language-dependent manner: TYPE_ALIAS_SET This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so forth as a substitute for the mark bits provided in `lang_type'. At present, only the six low-order bits are used. TYPE_LANG_SLOT_1 For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO. For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS BINFO_VIRTUALS For a binfo, this is a TREE_LIST. There is an entry for each virtual function declared either in BINFO or its direct and indirect primary bases. The BV_DELTA of each node gives the amount by which to adjust the `this' pointer when calling the function. If the method is an overridden version of a base class method, then it is assumed that, prior to adjustment, the this pointer points to an object of the base class. The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable index of the vcall offset for this entry. The BV_FN is the declaration for the virtual function itself. If BV_LOST_PRIMARY is set, it means that this entry is for a lost primary virtual base and can be left null in the vtable. BINFO_VTABLE This is an expression with POINTER_TYPE that gives the value to which the vptr should be initialized. Use get_vtbl_decl_for_binfo to extract the VAR_DECL for the complete vtable. DECL_VINDEX This field is NULL for a non-virtual function. For a virtual function, it is eventually set to an INTEGER_CST indicating the index in the vtable at which this function can be found. When a virtual function is declared, but before it is known what function is overridden, this field is the error_mark_node. Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is the virtual function this one overrides, and whose TREE_CHAIN is the old DECL_VINDEX. */ /* Language-specific tree checkers. */ #define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) #define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) #define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ __typeof (NODE) const __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ || !__t->decl_common.lang_specific->u.fn.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #else #define THUNK_FUNCTION_CHECK(NODE) (NODE) #endif /* Language-dependent contents of an identifier. */ struct GTY(()) lang_identifier { struct c_common_identifier c_common; cxx_binding *namespace_bindings; cxx_binding *bindings; tree class_template_info; tree label_value; }; /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID) #define LANG_IDENTIFIER_CAST(NODE) \ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) struct GTY(()) template_parm_index_s { struct tree_common common; int index; int level; int orig_level; int num_siblings; tree decl; }; typedef struct template_parm_index_s template_parm_index; struct GTY(()) ptrmem_cst { struct tree_common common; tree member; }; typedef struct ptrmem_cst * ptrmem_cst_t; #define IDENTIFIER_GLOBAL_VALUE(NODE) \ namespace_binding ((NODE), global_namespace) #define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), global_namespace, (VAL)) #define IDENTIFIER_NAMESPACE_VALUE(NODE) \ namespace_binding ((NODE), current_namespace) #define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), current_namespace, (VAL)) #define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) #define BIND_EXPR_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) /* Used to mark the block around the member initializers and cleanups. */ #define BIND_EXPR_BODY_BLOCK(NODE) \ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) #define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE)) #define STATEMENT_LIST_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) #define STATEMENT_LIST_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Marks the result of a statement expression. */ #define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual sense of `same'. */ #define same_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) /* Nonzero if we are presently building a statement tree, rather than expanding each statement as we encounter it. */ #define building_stmt_tree() (cur_stmt_list != NULL_TREE) /* Returns nonzero iff NODE is a declaration for the global function `main'. */ #define DECL_MAIN_P(NODE) \ (DECL_EXTERN_C_FUNCTION_P (NODE) \ && DECL_NAME (NODE) != NULL_TREE \ && MAIN_NAME_P (DECL_NAME (NODE)) \ && flag_hosted) /* The overloaded FUNCTION_DECL. */ #define OVL_FUNCTION(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) #define OVL_CHAIN(NODE) TREE_CHAIN (NODE) /* Polymorphic access to FUNCTION and CHAIN. */ #define OVL_CURRENT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE)) #define OVL_NEXT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE) /* If set, this was imported in a using declaration. This is not to confuse with being used somewhere, which is not important for this node. */ #define OVL_USED(NODE) TREE_USED (NODE) struct GTY(()) tree_overload { struct tree_common common; tree function; }; /* Returns true iff NODE is a BASELINK. */ #define BASELINK_P(NODE) \ (TREE_CODE (NODE) == BASELINK) /* The BINFO indicating the base from which the BASELINK_FUNCTIONS came. */ #define BASELINK_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) /* The functions referred to by the BASELINK; either a FUNCTION_DECL, a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ #define BASELINK_FUNCTIONS(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) /* The BINFO in which the search for the functions indicated by this baselink began. This base is used to determine the accessibility of functions selected by overload resolution. */ #define BASELINK_ACCESS_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) /* For a type-conversion operator, the BASELINK_OPTYPE indicates the type to which the conversion should occur. This value is important if the BASELINK_FUNCTIONS include a template conversion operator -- the BASELINK_OPTYPE can be used to determine what type the user requested. */ #define BASELINK_OPTYPE(NODE) \ (TREE_CHAIN (BASELINK_CHECK (NODE))) /* Nonzero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) struct GTY(()) tree_baselink { struct tree_common common; tree binfo; tree functions; tree access_binfo; }; /* The different kinds of ids that we encounter. */ typedef enum cp_id_kind { /* Not an id at all. */ CP_ID_KIND_NONE, /* An unqualified-id that is not a template-id. */ CP_ID_KIND_UNQUALIFIED, /* An unqualified-id that is a dependent name. */ CP_ID_KIND_UNQUALIFIED_DEPENDENT, /* An unqualified template-id. */ CP_ID_KIND_TEMPLATE_ID, /* A qualified-id. */ CP_ID_KIND_QUALIFIED } cp_id_kind; /* The various kinds of C++0x warnings we encounter. */ typedef enum cpp0x_warn_str { /* extended initializer lists */ CPP0X_INITIALIZER_LISTS, /* explicit conversion operators */ CPP0X_EXPLICIT_CONVERSION, /* variadic templates */ CPP0X_VARIADIC_TEMPLATES, /* lambda expressions */ CPP0X_LAMBDA_EXPR, /* C++0x auto */ CPP0X_AUTO, /* scoped enums */ CPP0X_SCOPED_ENUMS, /* defaulted and deleted functions */ CPP0X_DEFAULTED_DELETED, /* inline namespaces */ CPP0X_INLINE_NAMESPACES } cpp0x_warn_str; /* The various kinds of operation used by composite_pointer_type. */ typedef enum composite_pointer_operation { /* comparison */ CPO_COMPARISON, /* conversion */ CPO_CONVERSION, /* conditional expression */ CPO_CONDITIONAL_EXPR } composite_pointer_operation; /* Possible cases of expression list used by build_x_compound_expr_from_list. */ typedef enum expr_list_kind { ELK_INIT, /* initializer */ ELK_MEM_INIT, /* member initializer */ ELK_FUNC_CAST /* functional cast */ } expr_list_kind; /* Possible cases of implicit bad rhs conversions. */ typedef enum impl_conv_rhs { ICR_DEFAULT_ARGUMENT, /* default argument */ ICR_CONVERTING, /* converting */ ICR_INIT, /* initialization */ ICR_ARGPASS, /* argument passing */ ICR_RETURN, /* return */ ICR_ASSIGN /* assignment */ } impl_conv_rhs; /* Possible cases of implicit or explicit bad conversions to void. */ typedef enum impl_conv_void { ICV_CAST, /* (explicit) conversion to void */ ICV_SECOND_OF_COND, /* second operand of conditional expression */ ICV_THIRD_OF_COND, /* third operand of conditional expression */ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ ICV_LEFT_OF_COMMA, /* left operand of comma operator */ ICV_STATEMENT, /* statement */ ICV_THIRD_IN_FOR /* for increment expression */ } impl_conv_void; /* Macros for access to language-specific slots in an identifier. */ #define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->namespace_bindings) #define IDENTIFIER_TEMPLATE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->class_template_info) /* The IDENTIFIER_BINDING is the innermost cxx_binding for the identifier. It's PREVIOUS is the next outermost binding. Each VALUE field is a DECL for the associated declaration. Thus, name lookup consists simply of pulling off the node at the front of the list (modulo oddities for looking up the names of types, and such.) You can use SCOPE field to determine the scope that bound the name. */ #define IDENTIFIER_BINDING(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->bindings) /* TREE_TYPE only indicates on local and class scope the current type. For namespace scope, the presence of a type in any namespace is indicated with global_type_node, and the real type behind must be found through lookup. */ #define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE) #define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) #define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) #define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0) #define IDENTIFIER_LABEL_VALUE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->label_value) #define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \ IDENTIFIER_LABEL_VALUE (NODE) = (VALUE) /* Nonzero if this identifier is used as a virtual function name somewhere (optimizes searches). */ #define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE) /* Nonzero if this identifier is the prefix for a mangled C++ operator name. */ #define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE) /* Nonzero if this identifier is the name of a type-conversion operator. */ #define IDENTIFIER_TYPENAME_P(NODE) \ TREE_LANG_FLAG_4 (NODE) /* Nonzero if this identifier is the name of a constructor or destructor. */ #define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \ TREE_LANG_FLAG_3 (NODE) /* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague linkage which the prelinker has assigned to this translation unit. */ #define IDENTIFIER_REPO_CHOSEN(NAME) \ (TREE_LANG_FLAG_6 (NAME)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) /* The tokens stored in the default argument. */ #define DEFARG_TOKENS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens) #define DEFARG_INSTANTIATIONS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations) struct GTY (()) tree_default_arg { struct tree_common common; struct cp_token_cache *tokens; VEC(tree,gc) *instantiations; }; /* The condition associated with the static assertion. This must be an integral constant expression. */ #define STATIC_ASSERT_CONDITION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition) /* The message associated with the static assertion. This must be a string constant, which will be emitted as an error message when the static assert condition is false. */ #define STATIC_ASSERT_MESSAGE(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message) /* Source location information for a static assertion. */ #define STATIC_ASSERT_SOURCE_LOCATION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location) struct GTY (()) tree_static_assert { struct tree_common common; tree condition; tree message; location_t location; }; struct GTY (()) tree_argument_pack_select { struct tree_common common; tree argument_pack; int index; }; /* The different kinds of traits that we encounter. */ typedef enum cp_trait_kind { CPTK_HAS_NOTHROW_ASSIGN, CPTK_HAS_NOTHROW_CONSTRUCTOR, CPTK_HAS_NOTHROW_COPY, CPTK_HAS_TRIVIAL_ASSIGN, CPTK_HAS_TRIVIAL_CONSTRUCTOR, CPTK_HAS_TRIVIAL_COPY, CPTK_HAS_TRIVIAL_DESTRUCTOR, CPTK_HAS_VIRTUAL_DESTRUCTOR, CPTK_IS_ABSTRACT, CPTK_IS_BASE_OF, CPTK_IS_CLASS, CPTK_IS_CONVERTIBLE_TO, CPTK_IS_EMPTY, CPTK_IS_ENUM, CPTK_IS_POD, CPTK_IS_POLYMORPHIC, CPTK_IS_STD_LAYOUT, CPTK_IS_TRIVIAL, CPTK_IS_LITERAL_TYPE, CPTK_IS_UNION } cp_trait_kind; /* The types that we are processing. */ #define TRAIT_EXPR_TYPE1(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1) #define TRAIT_EXPR_TYPE2(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2) /* The specific trait that we are processing. */ #define TRAIT_EXPR_KIND(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind) struct GTY (()) tree_trait_expr { struct tree_common common; tree type1; tree type2; enum cp_trait_kind kind; }; /* Based off of TYPE_ANONYMOUS_P. */ #define LAMBDA_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LAMBDANAME_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* Test if FUNCTION_DECL is a lambda function. */ #define LAMBDA_FUNCTION_P(FNDECL) \ (DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \ && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL))) enum cp_lambda_default_capture_mode_type { CPLD_NONE, CPLD_COPY, CPLD_REFERENCE }; /* The method of default capture, if any. */ #define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode) /* The capture-list, including `this'. Each capture is stored as a FIELD_DECL * so that the name, type, and field are all together, whether or not it has * been added to the lambda's class type. TREE_LIST: TREE_PURPOSE: The FIELD_DECL for this capture. TREE_VALUE: The initializer. This is part of a GNU extension. */ #define LAMBDA_EXPR_CAPTURE_LIST(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list) /* The node in the capture-list that holds the 'this' capture. */ #define LAMBDA_EXPR_THIS_CAPTURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture) /* Predicate tracking whether `this' is in the effective capture set. */ #define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \ LAMBDA_EXPR_THIS_CAPTURE(NODE) /* Predicate tracking whether the lambda was declared 'mutable'. */ #define LAMBDA_EXPR_MUTABLE_P(NODE) \ TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE)) /* True iff we should try to deduce the lambda return type from any return statement. */ #define LAMBDA_EXPR_DEDUCE_RETURN_TYPE_P(NODE) \ TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE)) /* The return type in the expression. * NULL_TREE indicates that none was specified. */ #define LAMBDA_EXPR_RETURN_TYPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type) /* The source location of the lambda. */ #define LAMBDA_EXPR_LOCATION(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus) /* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL, FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */ #define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope) /* If EXTRA_SCOPE, this is the number of the lambda within that scope. */ #define LAMBDA_EXPR_DISCRIMINATOR(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator) struct GTY (()) tree_lambda_expr { struct tree_common common; location_t locus; enum cp_lambda_default_capture_mode_type default_capture_mode; tree capture_list; tree this_capture; tree return_type; tree extra_scope; int discriminator; }; /* A (typedef,context,usage location) triplet. It represents a typedef used through a context at a given source location. e.g. struct foo { typedef int myint; }; struct bar { foo::myint v; // #1<-- this location. }; In bar, the triplet will be (myint, foo, #1). */ struct GTY(()) qualified_typedef_usage_s { tree typedef_decl; tree context; location_t locus; }; typedef struct qualified_typedef_usage_s qualified_typedef_usage_t; DEF_VEC_O (qualified_typedef_usage_t); DEF_VEC_ALLOC_O (qualified_typedef_usage_t,gc); struct GTY(()) tree_template_info { struct tree_common common; VEC(qualified_typedef_usage_t,gc) *typedefs_needing_access_checking; }; enum cp_tree_node_structure_enum { TS_CP_GENERIC, TS_CP_IDENTIFIER, TS_CP_TPI, TS_CP_PTRMEM, TS_CP_BINDING, TS_CP_OVERLOAD, TS_CP_BASELINK, TS_CP_WRAPPER, TS_CP_DEFAULT_ARG, TS_CP_STATIC_ASSERT, TS_CP_ARGUMENT_PACK_SELECT, TS_CP_TRAIT_EXPR, TS_CP_LAMBDA_EXPR, TS_CP_TEMPLATE_INFO, LAST_TS_CP_ENUM }; /* The resulting tree type. */ union GTY((desc ("cp_tree_node_structure (&%h)"), chain_next ("(union lang_tree_node *)TREE_CHAIN (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("TS_CP_GENERIC"), desc ("tree_node_structure (&%h)"))) generic; struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi; struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg; struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT"))) static_assertion; struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT"))) argument_pack_select; struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR"))) trait_expression; struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR"))) lambda_expression; struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO"))) template_info; }; enum cp_tree_index { CPTI_JAVA_BYTE_TYPE, CPTI_JAVA_SHORT_TYPE, CPTI_JAVA_INT_TYPE, CPTI_JAVA_LONG_TYPE, CPTI_JAVA_FLOAT_TYPE, CPTI_JAVA_DOUBLE_TYPE, CPTI_JAVA_CHAR_TYPE, CPTI_JAVA_BOOLEAN_TYPE, CPTI_WCHAR_DECL, CPTI_VTABLE_ENTRY_TYPE, CPTI_DELTA_TYPE, CPTI_VTABLE_INDEX_TYPE, CPTI_CLEANUP_TYPE, CPTI_VTT_PARM_TYPE, CPTI_CLASS_TYPE, CPTI_UNKNOWN_TYPE, CPTI_INIT_LIST_TYPE, CPTI_VTBL_TYPE, CPTI_VTBL_PTR_TYPE, CPTI_STD, CPTI_ABI, CPTI_CONST_TYPE_INFO_TYPE, CPTI_TYPE_INFO_PTR_TYPE, CPTI_ABORT_FNDECL, CPTI_GLOBAL_DELETE_FNDECL, CPTI_AGGR_TAG, CPTI_CTOR_IDENTIFIER, CPTI_COMPLETE_CTOR_IDENTIFIER, CPTI_BASE_CTOR_IDENTIFIER, CPTI_DTOR_IDENTIFIER, CPTI_COMPLETE_DTOR_IDENTIFIER, CPTI_BASE_DTOR_IDENTIFIER, CPTI_DELETING_DTOR_IDENTIFIER, CPTI_DELTA_IDENTIFIER, CPTI_IN_CHARGE_IDENTIFIER, CPTI_VTT_PARM_IDENTIFIER, CPTI_NELTS_IDENTIFIER, CPTI_THIS_IDENTIFIER, CPTI_PFN_IDENTIFIER, CPTI_VPTR_IDENTIFIER, CPTI_STD_IDENTIFIER, CPTI_LANG_NAME_C, CPTI_LANG_NAME_CPLUSPLUS, CPTI_LANG_NAME_JAVA, CPTI_EMPTY_EXCEPT_SPEC, CPTI_NOEXCEPT_TRUE_SPEC, CPTI_NOEXCEPT_FALSE_SPEC, CPTI_JCLASS, CPTI_TERMINATE, CPTI_CALL_UNEXPECTED, CPTI_ATEXIT_FN_PTR_TYPE, CPTI_ATEXIT, CPTI_DSO_HANDLE, CPTI_DCAST, CPTI_KEYED_CLASSES, CPTI_NULLPTR, CPTI_NULLPTR_TYPE, CPTI_MAX }; extern GTY(()) tree cp_global_trees[CPTI_MAX]; #define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE] #define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE] #define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE] #define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE] #define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE] #define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE] #define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE] #define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE] #define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] #define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] /* The type used to represent an offset by which to adjust the `this' pointer in pointer-to-member types. */ #define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] /* The type used to represent an index into the vtable. */ #define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] #define class_type_node cp_global_trees[CPTI_CLASS_TYPE] #define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] #define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE] #define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] #define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] #define std_node cp_global_trees[CPTI_STD] #define abi_node cp_global_trees[CPTI_ABI] #define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] #define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE] #define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] #define global_delete_fndecl cp_global_trees[CPTI_GLOBAL_DELETE_FNDECL] #define current_aggr cp_global_trees[CPTI_AGGR_TAG] #define nullptr_node cp_global_trees[CPTI_NULLPTR] #define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE] /* We cache these tree nodes so as to call get_identifier less frequently. */ /* The name of a constructor that takes an in-charge parameter to decide whether or not to construct virtual base classes. */ #define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] /* The name of a constructor that constructs virtual base classes. */ #define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] /* The name of a constructor that does not construct virtual base classes. */ #define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] /* The name of a destructor that takes an in-charge parameter to decide whether or not to destroy virtual base classes and whether or not to delete the object. */ #define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes. */ #define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] /* The name of a destructor that does not destroy virtual base classes. */ #define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes, and then deletes the entire object. */ #define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] #define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] #define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] /* The name of the parameter that contains a pointer to the VTT to use for this subobject constructor or destructor. */ #define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] #define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER] #define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] #define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] #define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] /* The name of the std namespace. */ #define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER] #define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] #define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] #define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA] /* Exception specifier used for throw(). */ #define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] #define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC] #define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC] /* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */ #define jclass_node cp_global_trees[CPTI_JCLASS] /* The declaration for `std::terminate'. */ #define terminate_node cp_global_trees[CPTI_TERMINATE] /* The declaration for "__cxa_call_unexpected". */ #define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED] /* The type of the function-pointer argument to "__cxa_atexit" (or "std::atexit", if "__cxa_atexit" is not being used). */ #define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE] /* A pointer to `std::atexit'. */ #define atexit_node cp_global_trees[CPTI_ATEXIT] /* A pointer to `__dso_handle'. */ #define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] /* The declaration of the dynamic_cast runtime. */ #define dynamic_cast_node cp_global_trees[CPTI_DCAST] /* The type of a destructor. */ #define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] /* The type of the vtt parameter passed to subobject constructors and destructors. */ #define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] /* A TREE_LIST of the dynamic classes whose vtables may have to be emitted in this translation unit. */ #define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES] /* Node to indicate default access. This must be distinct from the access nodes in tree.h. */ #define access_default_node null_node /* Global state. */ struct GTY(()) saved_scope { VEC(cxx_saved_binding,gc) *old_bindings; tree old_namespace; VEC(tree,gc) *decl_ns_list; tree class_name; tree class_type; tree access_specifier; tree function_decl; VEC(tree,gc) *lang_base; tree lang_name; tree template_parms; struct cp_binding_level *x_previous_class_level; tree x_saved_tree; int x_processing_template_decl; int x_processing_specialization; BOOL_BITFIELD x_processing_explicit_instantiation : 1; BOOL_BITFIELD need_pop_function_context : 1; int unevaluated_operand; int inhibit_evaluation_warnings; struct stmt_tree_s x_stmt_tree; struct cp_binding_level *class_bindings; struct cp_binding_level *bindings; struct saved_scope *prev; }; /* The current open namespace. */ #define current_namespace scope_chain->old_namespace /* The stack for namespaces of current declarations. */ #define decl_namespace_list scope_chain->decl_ns_list /* IDENTIFIER_NODE: name of current class */ #define current_class_name scope_chain->class_name /* _TYPE: the type of the current class */ #define current_class_type scope_chain->class_type /* When parsing a class definition, the access specifier most recently given by the user, or, if no access specifier was given, the default value appropriate for the kind of class (i.e., struct, class, or union). */ #define current_access_specifier scope_chain->access_specifier /* Pointer to the top of the language name stack. */ #define current_lang_base scope_chain->lang_base #define current_lang_name scope_chain->lang_name /* When parsing a template declaration, a TREE_LIST represents the active template parameters. Each node in the list represents one level of template parameters. The innermost level is first in the list. The depth of each level is stored as an INTEGER_CST in the TREE_PURPOSE of each node. The parameters for that level are stored in the TREE_VALUE. */ #define current_template_parms scope_chain->template_parms #define processing_template_decl scope_chain->x_processing_template_decl #define processing_specialization scope_chain->x_processing_specialization #define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation /* The cached class binding level, from the most recently exited class, or NULL if none. */ #define previous_class_level scope_chain->x_previous_class_level /* A list of private types mentioned, for deferred access checking. */ extern GTY(()) struct saved_scope *scope_chain; struct GTY(()) cxx_int_tree_map { unsigned int uid; tree to; }; extern unsigned int cxx_int_tree_map_hash (const void *); extern int cxx_int_tree_map_eq (const void *, const void *); /* Global state pertinent to the current function. */ struct GTY(()) language_function { struct c_language_function base; tree x_cdtor_label; tree x_current_class_ptr; tree x_current_class_ref; tree x_eh_spec_block; tree x_in_charge_parm; tree x_vtt_parm; tree x_return_value; BOOL_BITFIELD returns_value : 1; BOOL_BITFIELD returns_null : 1; BOOL_BITFIELD returns_abnormally : 1; BOOL_BITFIELD in_function_try_handler : 1; BOOL_BITFIELD in_base_initializer : 1; /* True if this function can throw an exception. */ BOOL_BITFIELD can_throw : 1; htab_t GTY((param_is(struct named_label_entry))) x_named_labels; struct cp_binding_level *bindings; VEC(tree,gc) *x_local_names; htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map; }; /* The current C++-specific per-function global variables. */ #define cp_function_chain (cfun->language) /* In a constructor destructor, the point at which all derived class destroying/construction has been done. I.e., just before a constructor returns, or before any base class destroying will be done in a destructor. */ #define cdtor_label cp_function_chain->x_cdtor_label /* When we're processing a member function, current_class_ptr is the PARM_DECL for the `this' pointer. The current_class_ref is an expression for `*this'. */ #define current_class_ptr \ (cfun && cp_function_chain \ ? cp_function_chain->x_current_class_ptr : NULL_TREE) #define current_class_ref \ ((cfun && cp_function_chain) \ ? cp_function_chain->x_current_class_ref : NULL_TREE) /* The EH_SPEC_BLOCK for the exception-specifiers for the current function, if any. */ #define current_eh_spec_block cp_function_chain->x_eh_spec_block /* The `__in_chrg' parameter for the current function. Only used for constructors and destructors. */ #define current_in_charge_parm cp_function_chain->x_in_charge_parm /* The `__vtt_parm' parameter for the current function. Only used for constructors and destructors. */ #define current_vtt_parm cp_function_chain->x_vtt_parm /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ #define current_function_returns_value cp_function_chain->returns_value /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ #define current_function_returns_null cp_function_chain->returns_null /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ #define current_function_returns_abnormally \ cp_function_chain->returns_abnormally /* Nonzero if we are processing a base initializer. Zero elsewhere. */ #define in_base_initializer cp_function_chain->in_base_initializer #define in_function_try_handler cp_function_chain->in_function_try_handler /* Expression always returned from function, or error_mark_node otherwise, for use by the automatic named return value optimization. */ #define current_function_return_value \ (cp_function_chain->x_return_value) /* True if NAME is the IDENTIFIER_NODE for an overloaded "operator new" or "operator delete". */ #define NEW_DELETE_OPNAME_P(NAME) \ ((NAME) == ansi_opname (NEW_EXPR) \ || (NAME) == ansi_opname (VEC_NEW_EXPR) \ || (NAME) == ansi_opname (DELETE_EXPR) \ || (NAME) == ansi_opname (VEC_DELETE_EXPR)) #define ansi_opname(CODE) \ (operator_name_info[(int) (CODE)].identifier) #define ansi_assopname(CODE) \ (assignment_operator_name_info[(int) (CODE)].identifier) /* True if NODE is an erroneous expression. */ #define error_operand_p(NODE) \ ((NODE) == error_mark_node \ || ((NODE) && TREE_TYPE ((NODE)) == error_mark_node)) /* TRUE if a tree code represents a statement. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] enum languages { lang_c, lang_cplusplus, lang_java }; /* Macros to make error reporting functions' lives easier. */ #define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE))) #define TYPE_LINKAGE_IDENTIFIER(NODE) \ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) #define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) #define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) /* Nonzero if NODE has no name for linkage purposes. */ #define TYPE_ANONYMOUS_P(NODE) \ (TAGGED_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* The _DECL for this _TYPE. */ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Keep these checks in ascending code order. */ #define MAYBE_CLASS_TYPE_P(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (T) == DECLTYPE_TYPE \ || CLASS_TYPE_P (T)) /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or union type. */ #define SET_CLASS_TYPE_P(T, VAL) \ (TYPE_LANG_FLAG_5 (T) = (VAL)) /* Nonzero if T is a class type. Zero for template type parameters, typename types, and so forth. */ #define CLASS_TYPE_P(T) \ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) /* Nonzero if T is a class type but not an union. */ #define NON_UNION_CLASS_TYPE_P(T) \ (CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE) /* Keep these checks in ascending code order. */ #define RECORD_OR_UNION_CODE_P(T) \ ((T) == RECORD_TYPE || (T) == UNION_TYPE) #define TAGGED_TYPE_P(T) \ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) #define IS_OVERLOAD_TYPE(T) TAGGED_TYPE_P (T) /* True if this a "Java" type, defined in 'extern "Java"'. */ #define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE) /* True if this type is dependent. This predicate is only valid if TYPE_DEPENDENT_P_VALID is true. */ #define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) /* True if dependent_type_p has been called for this type, with the result that TYPE_DEPENDENT_P is valid. */ #define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) /* Nonzero if this type is const-qualified. */ #define CP_TYPE_CONST_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) /* Nonzero if this type is volatile-qualified. */ #define CP_TYPE_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) /* Nonzero if this type is restrict-qualified. */ #define CP_TYPE_RESTRICT_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) /* Nonzero if this type is const-qualified, but not volatile-qualified. Other qualifiers are ignored. This macro is used to test whether or not it is OK to bind an rvalue to a reference. */ #define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ == TYPE_QUAL_CONST) #define FUNCTION_ARG_CHAIN(NODE) \ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES which refers to a user-written parameter. */ #define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Similarly, but for DECL_ARGUMENTS. */ #define FUNCTION_FIRST_USER_PARM(NODE) \ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) /* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and ambiguity issues. */ #define DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_any, NULL) != NULL_TREE) /* Nonzero iff TYPE is uniquely derived from PARENT. Ignores accessibility. */ #define UNIQUELY_DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_unique | ba_quiet, NULL) != NULL_TREE) /* Nonzero iff TYPE is publicly & uniquely derived from PARENT. */ #define PUBLICLY_UNIQUELY_DERIVED_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_ignore_scope | ba_check | ba_quiet, \ NULL) != NULL_TREE) /* Gives the visibility specification for a class type. */ #define CLASSTYPE_VISIBILITY(TYPE) \ DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE)) #define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE)) typedef struct GTY (()) tree_pair_s { tree purpose; tree value; } tree_pair_s; typedef tree_pair_s *tree_pair_p; DEF_VEC_O (tree_pair_s); DEF_VEC_ALLOC_O (tree_pair_s,gc); /* This is a few header flags for 'struct lang_type'. Actually, all but the first are used only for lang_type_class; they are put in this structure to save space. */ struct GTY(()) lang_type_header { BOOL_BITFIELD is_lang_type_class : 1; BOOL_BITFIELD has_type_conversion : 1; BOOL_BITFIELD has_copy_ctor : 1; BOOL_BITFIELD has_default_ctor : 1; BOOL_BITFIELD const_needs_init : 1; BOOL_BITFIELD ref_needs_init : 1; BOOL_BITFIELD has_const_copy_assign : 1; BOOL_BITFIELD spare : 1; }; /* This structure provides additional information above and beyond what is provide in the ordinary tree_type. In the past, we used it for the types of class types, template parameters types, typename types, and so forth. However, there can be many (tens to hundreds of thousands) of template parameter types in a compilation, and there's no need for this additional information in that case. Therefore, we now use this data structure only for class types. In the past, it was thought that there would be relatively few class types. However, in the presence of heavy use of templates, many (i.e., thousands) of classes can easily be generated. Therefore, we should endeavor to keep the size of this structure to a minimum. */ struct GTY(()) lang_type_class { struct lang_type_header h; unsigned char align; unsigned has_mutable : 1; unsigned com_interface : 1; unsigned non_pod_class : 1; unsigned nearly_empty_p : 1; unsigned user_align : 1; unsigned has_copy_assign : 1; unsigned has_new : 1; unsigned has_array_new : 1; unsigned gets_delete : 2; unsigned interface_only : 1; unsigned interface_unknown : 1; unsigned contains_empty_class_p : 1; unsigned anon_aggr : 1; unsigned non_zero_init : 1; unsigned empty_p : 1; unsigned vec_new_uses_cookie : 1; unsigned declared_class : 1; unsigned diamond_shaped : 1; unsigned repeated_base : 1; unsigned being_defined : 1; unsigned java_interface : 1; unsigned debug_requested : 1; unsigned fields_readonly : 1; unsigned use_template : 2; unsigned ptrmemfunc_flag : 1; unsigned was_anonymous : 1; unsigned lazy_default_ctor : 1; unsigned lazy_copy_ctor : 1; unsigned lazy_copy_assign : 1; unsigned lazy_destructor : 1; unsigned has_const_copy_ctor : 1; unsigned has_complex_copy_ctor : 1; unsigned has_complex_copy_assign : 1; unsigned non_aggregate : 1; unsigned has_complex_dflt : 1; unsigned has_list_ctor : 1; unsigned non_std_layout : 1; unsigned is_literal : 1; unsigned lazy_move_ctor : 1; unsigned lazy_move_assign : 1; unsigned has_complex_move_ctor : 1; unsigned has_complex_move_assign : 1; unsigned has_constexpr_ctor : 1; /* When adding a flag here, consider whether or not it ought to apply to a template instance if it applies to the template. If so, make sure to copy it in instantiate_class_template! */ /* There are some bits left to fill out a 32-bit word. Keep track of this by updating the size of this bitfield whenever you add or remove a flag. */ unsigned dummy : 3; tree primary_base; VEC(tree_pair_s,gc) *vcall_indices; tree vtables; tree typeinfo_var; VEC(tree,gc) *vbases; binding_table nested_udts; tree as_base; VEC(tree,gc) *pure_virtuals; tree friend_classes; VEC(tree,gc) * GTY((reorder ("resort_type_method_vec"))) methods; tree key_method; tree decl_list; tree template_info; tree befriending_classes; /* In a RECORD_TYPE, information specific to Objective-C++, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; /* sorted_fields is sorted based on a pointer, so we need to be able to resort it if pointers get rearranged. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) sorted_fields; /* FIXME reuse another field? */ tree lambda_expr; }; struct GTY(()) lang_type_ptrmem { struct lang_type_header h; tree record; }; struct GTY((variable_size)) lang_type { union lang_type_u { struct lang_type_header GTY((skip (""))) h; struct lang_type_class GTY((tag ("1"))) c; struct lang_type_ptrmem GTY((tag ("0"))) ptrmem; } GTY((desc ("%h.h.is_lang_type_class"))) u; }; #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (! lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.c; }) #define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ptrmem; }) #else #define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c) #define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem) #endif /* ENABLE_TREE_CHECKING */ /* Nonzero for _CLASSTYPE means that operator delete is defined. */ #define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) #define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) /* Nonzero if `new NODE[x]' should cause the allocation of extra storage to indicate how many array elements are in use. */ #define TYPE_VEC_NEW_USES_COOKIE(NODE) \ (CLASS_TYPE_P (NODE) \ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) /* Nonzero means that this _CLASSTYPE node defines ways of converting itself to other types. */ #define TYPE_HAS_CONVERSION(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion) /* Nonzero means that NODE (a class type) has a default constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) /* Nonzero means that NODE (a class type) has a copy constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) /* Nonzero means that NODE (a class type) has a move constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign) /* Nonzero means that NODE (a class type) has a destructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) /* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ #define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign) /* True iff the class type NODE has an "operator =" whose parameter has a parameter of type "const X&". */ #define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign) /* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ #define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor) #define TYPE_HAS_CONST_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor) /* Nonzero if this class has an X(initializer_list<T>) constructor. */ #define TYPE_HAS_LIST_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor) /* Nonzero if this class has a constexpr constructor other than a copy/move constructor. Note that a class can have constexpr constructors for static initialization even if it isn't a literal class. */ #define TYPE_HAS_CONSTEXPR_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor) /* Nonzero if this class defines an overloaded operator new. (An operator new [] doesn't count.) */ #define TYPE_HAS_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_new) /* Nonzero if this class defines an overloaded operator new[]. */ #define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) /* Nonzero means that this type is being defined. I.e., the left brace starting the definition of this type has been seen. */ #define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) /* Nonzero means that this type is either complete or being defined, so we can do lookup in it. */ #define COMPLETE_OR_OPEN_TYPE_P(NODE) \ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE))) /* Mark bits for repeated base checks. */ #define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) /* Nonzero if the class NODE has multiple paths to the same (virtual) base object. */ #define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) /* Nonzero if the class NODE has multiple instances of the same base type. */ #define CLASSTYPE_REPEATED_BASE_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) /* The member function with which the vtable will be emitted: the first noninline non-pure-virtual member function. NULL_TREE if there is no key function or if this is a class template */ #define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) /* Vector member functions defined in this class. Each element is either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All functions with the same name end up in the same slot. The first two elements are for constructors, and destructors, respectively. All template conversion operators to innermost template dependent types are overloaded on the next slot, if they exist. Note, the names for these functions will not all be the same. The non-template conversion operators & templated conversions to non-innermost template types are next, followed by ordinary member functions. There may be empty entries at the end of the vector. The conversion operators are unsorted. The ordinary member functions are sorted, once the class is complete. */ #define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods) /* For class templates, this is a TREE_LIST of all member data, functions, types, and friends in the order of declaration. The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, and the RECORD_TYPE for the class template otherwise. */ #define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) /* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */ #define CLASSTYPE_CONSTRUCTOR_SLOT 0 /* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */ #define CLASSTYPE_DESTRUCTOR_SLOT 1 /* The first slot in the CLASSTYPE_METHOD_VEC where conversion operators can appear. */ #define CLASSTYPE_FIRST_CONVERSION_SLOT 2 /* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These are the constructors that take an in-charge parameter. */ #define CLASSTYPE_CONSTRUCTORS(NODE) \ (VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_CONSTRUCTOR_SLOT)) /* A FUNCTION_DECL for the destructor for NODE. These are the destructors that take an in-charge parameter. If CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL until the destructor is created with lazily_declare_fn. */ #define CLASSTYPE_DESTRUCTORS(NODE) \ (CLASSTYPE_METHOD_VEC (NODE) \ ? VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_DESTRUCTOR_SLOT) \ : NULL_TREE) /* A dictionary of the nested user-defined-types (class-types, or enums) found within this class. This table includes nested member class templates. */ #define CLASSTYPE_NESTED_UTDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nested_udts) /* Nonzero if NODE has a primary base class, i.e., a base class with which it shares the virtual function table pointer. */ #define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) /* If non-NULL, this is the binfo for the primary base class, i.e., the base class which contains the virtual function table pointer for this class. */ #define CLASSTYPE_PRIMARY_BINFO(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) /* A vector of BINFOs for the direct and indirect virtual base classes that this type uses in a post-order depth-first left-to-right order. (In other words, these bases appear in the order that they should be initialized.) */ #define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) /* The type corresponding to NODE when NODE is used as a base class, i.e., NODE without virtual base classes. */ #define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) /* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ #define IS_FAKE_BASE_TYPE(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) /* These are the size and alignment of the type without its virtual base classes, for when we use this type as a base itself. */ #define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) /* The alignment of NODE, without its virtual bases, in bytes. */ #define CLASSTYPE_ALIGN_UNIT(NODE) \ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) /* True if this a Java interface type, declared with '__attribute__ ((java_interface))'. */ #define TYPE_JAVA_INTERFACE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->java_interface) /* A VEC(tree) of virtual functions which cannot be inherited by derived classes. When deriving from this type, the derived class must provide its own definition for each of these functions. */ #define CLASSTYPE_PURE_VIRTUALS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) /* Nonzero means that this type has an X() constructor. */ #define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor) /* Nonzero means that this type contains a mutable member. */ #define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) #define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) /* Nonzero means that this class type is not POD for the purpose of layout (as defined in the ABI). This is different from the language's POD. */ #define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) /* Nonzero means that this class type is a non-standard-layout class. */ #define CLASSTYPE_NON_STD_LAYOUT(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout) /* Nonzero means that this class contains pod types whose default initialization is not a zero initialization (namely, pointers to data members). */ #define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) /* Nonzero if this class is "empty" in the sense of the C++ ABI. */ #define CLASSTYPE_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) /* Nonzero if this class is "nearly empty", i.e., contains only a virtual function table pointer. */ #define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) /* Nonzero if this class contains an empty subobject. */ #define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) /* A list of class types of which this type is a friend. The TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the case of a template friend. */ #define CLASSTYPE_FRIEND_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) /* A list of the classes which grant friendship to this class. */ #define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) /* The associated LAMBDA_EXPR that made this class. */ #define CLASSTYPE_LAMBDA_EXPR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr) /* The extra mangling scope for this closure type. */ #define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \ (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE))) /* Say whether this node was declared as a "class" or a "struct". */ #define CLASSTYPE_DECLARED_CLASS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) /* Nonzero if this class has const members which have no specified initialization. */ #define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0) #define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE)) /* Nonzero if this class has ref members which have no specified initialization. */ #define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0) #define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE)) /* Nonzero if this class is included from a header file which employs `#pragma interface', and it is not included in its implementation file. */ #define CLASSTYPE_INTERFACE_ONLY(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) /* True if we have already determined whether or not vtables, VTTs, typeinfo, and other similar per-class data should be emitted in this translation unit. This flag does not indicate whether or not these items should be emitted; it only indicates that we know one way or the other. */ #define CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) /* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ #define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) #define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) #define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) #define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) /* Nonzero if a _DECL node requires us to output debug info for this class. */ #define CLASSTYPE_DEBUG_REQUESTED(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) /* Additional macros for inheritance information. */ /* Nonzero means that this class is on a path leading to a new vtable. */ #define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) /* Nonzero means B (a BINFO) has its own vtable. Any copies will not have this flag set. */ #define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) /* Compare a BINFO_TYPE with another type for equality. For a binfo, this is functionally equivalent to using same_type_p, but measurably faster. At least one of the arguments must be a BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If BINFO_TYPE(T) ever stops being the main variant of the class the binfo is for, this macro must change. */ #define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) /* Any subobject that needs a new vtable must have a vptr and must not be a non-virtual primary base (since it would then use the vtable from a derived class and never become non-primary.) */ #define SET_BINFO_NEW_VTABLE_MARKED(B) \ (BINFO_NEW_VTABLE_MARKED (B) = 1, \ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) /* Nonzero if this binfo is for a dependent base - one that should not be searched. */ #define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) /* Nonzero if this binfo has lost its primary base binfo (because that is a nearly-empty virtual base that has been taken by some other base in the complete hierarchy. */ #define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) /* Nonzero if this BINFO is a primary base class. */ #define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) /* Used by various search routines. */ #define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE) /* A VEC(tree_pair_s) of the vcall indices associated with the class NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual function. The VALUE is the index into the virtual table where the vcall offset for that function is stored, when NODE is a virtual base. */ #define CLASSTYPE_VCALL_INDICES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) /* The various vtables for the class NODE. The primary vtable will be first, followed by the construction vtables and VTT, if any. */ #define CLASSTYPE_VTABLES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vtables) /* The std::type_info variable representing this class, or NULL if no such variable has been created. This field is only set for the TYPE_MAIN_VARIANT of the class. */ #define CLASSTYPE_TYPEINFO_VAR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) /* Accessor macros for the BINFO_VIRTUALS list. */ /* The number of bytes by which to adjust the `this' pointer when calling this virtual function. Subtract this value from the this pointer. Always non-NULL, might be constant zero though. */ #define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) /* If non-NULL, the vtable index at which to find the vcall offset when calling this virtual function. Add the value at that vtable index to the this pointer. */ #define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) /* The function to call. */ #define BV_FN(NODE) (TREE_VALUE (NODE)) /* Whether or not this entry is for a lost primary virtual base. */ #define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE will be NULL_TREE to indicate a throw specification of `()', or no exceptions allowed. For a noexcept specification, TREE_VALUE is NULL_TREE and TREE_PURPOSE is the constant-expression. */ #define TYPE_RAISES_EXCEPTIONS(NODE) TYPE_LANG_SLOT_1 (NODE) /* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()' or noexcept(true). */ #define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the case for things declared noexcept(true) and, with -fnothrow-opt, for throw() functions. */ #define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE) /* The binding level associated with the namespace. */ #define NAMESPACE_LEVEL(NODE) \ (LANG_DECL_NS_CHECK (NODE)->level) /* Flags shared by all forms of DECL_LANG_SPECIFIC. Some of the flags live here only to make lang_decl_min/fn smaller. Do not make this struct larger than 32 bits; instead, make sel smaller. */ struct GTY(()) lang_decl_base { unsigned selector : 16; /* Larger than necessary for faster access. */ ENUM_BITFIELD(languages) language : 4; unsigned use_template : 2; unsigned not_really_extern : 1; /* var or fn */ unsigned initialized_in_class : 1; /* var or fn */ unsigned repo_available_p : 1; /* var or fn */ unsigned threadprivate_or_deleted_p : 1; /* var or fn */ unsigned anticipated_p : 1; /* fn or type */ unsigned friend_attr : 1; /* fn or type */ unsigned template_conv_p : 1; /* var or template */ unsigned odr_used : 1; /* var or fn */ unsigned u2sel : 1; /* 1 spare bit */ }; /* True for DECL codes which have template info and access. */ #define LANG_DECL_HAS_MIN(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ || TREE_CODE (NODE) == FIELD_DECL \ || TREE_CODE (NODE) == VAR_DECL \ || TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == USING_DECL) /* DECL_LANG_SPECIFIC for the above codes. */ struct GTY(()) lang_decl_min { struct lang_decl_base base; /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_ALIAS. In a FUNCTION_DECL for which DECL_THUNK_P does not hold, VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is DECL_TEMPLATE_INFO. */ tree template_info; union lang_decl_u2 { /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_VIRTUAL_OFFSET. Otherwise this is DECL_ACCESS. */ tree GTY ((tag ("0"))) access; /* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */ int GTY ((tag ("1"))) discriminator; } GTY ((desc ("%0.u.base.u2sel"))) u2; }; /* Additional DECL_LANG_SPECIFIC information for functions. */ struct GTY(()) lang_decl_fn { struct lang_decl_min min; /* In an overloaded operator, this is the value of DECL_OVERLOADED_OPERATOR_P. */ ENUM_BITFIELD (tree_code) operator_code : 16; unsigned global_ctor_p : 1; unsigned global_dtor_p : 1; unsigned constructor_attr : 1; unsigned destructor_attr : 1; unsigned assignment_operator_p : 1; unsigned static_function : 1; unsigned pure_virtual : 1; unsigned defaulted_p : 1; unsigned has_in_charge_parm_p : 1; unsigned has_vtt_parm_p : 1; unsigned pending_inline_p : 1; unsigned nonconverting : 1; unsigned thunk_p : 1; unsigned this_thunk_p : 1; unsigned hidden_friend_p : 1; /* 1 spare bit. */ /* For a non-thunk function decl, this is a tree list of friendly classes. For a thunk function decl, it is the thunked to function decl. */ tree befriending_classes; /* For a non-virtual FUNCTION_DECL, this is DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both this pointer and result pointer adjusting thunks are chained here. This pointer thunks to return pointer thunks will be chained on the return pointer thunk. */ tree context; union lang_decl_u5 { /* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is DECL_CLONED_FUNCTION. */ tree GTY ((tag ("0"))) cloned_function; /* In a FUNCTION_DECL for which THUNK_P holds this is the THUNK_FIXED_OFFSET. */ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; } GTY ((desc ("%1.thunk_p"))) u5; union lang_decl_u3 { struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info; struct language_function * GTY ((tag ("0"))) saved_language_function; } GTY ((desc ("%1.pending_inline_p"))) u; }; /* DECL_LANG_SPECIFIC for namespaces. */ struct GTY(()) lang_decl_ns { struct lang_decl_base base; struct cp_binding_level *level; }; /* DECL_LANG_SPECIFIC for parameters. */ struct GTY(()) lang_decl_parm { struct lang_decl_base base; int level; int index; }; /* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a union rather than a struct containing a union as its only field, but tree.h declares it as a struct. */ struct GTY((variable_size)) lang_decl { union GTY((desc ("%h.base.selector"))) lang_decl_u { struct lang_decl_base GTY ((default)) base; struct lang_decl_min GTY((tag ("0"))) min; struct lang_decl_fn GTY ((tag ("1"))) fn; struct lang_decl_ns GTY((tag ("2"))) ns; struct lang_decl_parm GTY((tag ("3"))) parm; } u; }; /* Looks through a template (if present) to find what it declares. */ #define STRIP_TEMPLATE(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_DECL_MIN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE)) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min; }) /* We want to be able to check DECL_CONSTRUCTOR_P and such on a function template, not just on a FUNCTION_DECL. So when looking for things in lang_decl_fn, look down through a TEMPLATE_DECL into its result. */ #define LANG_DECL_FN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \ if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.fn; }) #define LANG_DECL_NS_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ns; }) #define LANG_DECL_PARM_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != PARM_DECL) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.parm; }) #define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min.u2; }) #else #define LANG_DECL_MIN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.min) #define LANG_DECL_FN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn) #define LANG_DECL_NS_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.ns) #define LANG_DECL_PARM_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.parm) #define LANG_DECL_U2_CHECK(NODE, TF) \ (&DECL_LANG_SPECIFIC (NODE)->u.min.u2) #endif /* ENABLE_TREE_CHECKING */ /* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the declaration. Some entities (like a member function in a local class, or a local variable) do not have linkage at all, and this macro should not be used in those cases. Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was created by language-independent code, and has C linkage. Most VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ #define DECL_LANGUAGE(NODE) \ (DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.language \ : (TREE_CODE (NODE) == FUNCTION_DECL \ ? lang_c : lang_cplusplus)) /* Set the language linkage for NODE to LANGUAGE. */ #define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE)) /* For FUNCTION_DECLs: nonzero means that this function is a constructor. */ #define DECL_CONSTRUCTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->constructor_attr) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete object. */ #define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base object. */ #define DECL_BASE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the specialized in-charge constructor or the specialized not-in-charge constructor. */ #define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \ && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ #define DECL_COPY_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) /* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */ #define DECL_MOVE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE)) /* Nonzero if NODE is a destructor. */ #define DECL_DESTRUCTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->destructor_attr) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the specialized in-charge constructor, in-charge deleting constructor, or the base destructor. */ #define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \ && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object. */ #define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base object. */ #define DECL_BASE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object that deletes the object after it has been destroyed. */ #define DECL_DELETING_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == deleting_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or destructor. */ #define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true)) /* If DECL_CLONED_FUNCTION_P holds, this is the function that was cloned. */ #define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false)) /* Perform an action for each clone of FN, if FN is a function with clones. This macro should be used like: FOR_EACH_CLONE (clone, fn) { ... } */ #define FOR_EACH_CLONE(CLONE, FN) \ if (TREE_CODE (FN) == FUNCTION_DECL \ && (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \ for (CLONE = DECL_CHAIN (FN); \ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ CLONE = DECL_CHAIN (CLONE)) /* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ #define DECL_DISCRIMINATOR_P(NODE) \ (TREE_CODE (NODE) == VAR_DECL \ && DECL_FUNCTION_SCOPE_P (NODE)) /* Discriminator for name mangling. */ #define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator) /* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */ #define DECL_DISCRIMINATOR_SET_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1) /* The index of a user-declared parameter in its function, starting at 1. All artificial parameters will have index 0. */ #define DECL_PARM_INDEX(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->index) /* The level of a user-declared parameter in its function, starting at 1. A parameter of the function will have level 1; a parameter of the first nested function declarator (i.e. t in void f (void (*p)(T t))) will have level 2. */ #define DECL_PARM_LEVEL(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->level) /* Nonzero if the VTT parm has been added to NODE. */ #define DECL_HAS_VTT_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p) /* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is required. */ #define DECL_NEEDS_VTT_PARM_P(NODE) \ (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \ && (DECL_BASE_CONSTRUCTOR_P (NODE) \ || DECL_BASE_DESTRUCTOR_P (NODE))) /* Nonzero if NODE is a user-defined conversion operator. */ #define DECL_CONV_FN_P(NODE) \ (DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE))) /* If FN is a conversion operator, the type to which it converts. Otherwise, NULL_TREE. */ #define DECL_CONV_FN_TYPE(FN) \ (DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE) /* Nonzero if NODE, which is a TEMPLATE_DECL, is a template conversion operator to a type dependent on the innermost template args. */ #define DECL_TEMPLATE_CONV_FN_P(NODE) \ (DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p) /* Nonzero if NODE, a static data member, was declared in its class as an array of unknown bound. */ #define VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \ : false) #define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true) /* Set the overloaded operator code for NODE to CODE. */ #define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \ (LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE)) /* If NODE is an overloaded operator, then this returns the TREE_CODE associated with the overloaded operator. DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine whether or not NODE is an assignment operator. If NODE is not an overloaded operator, ERROR_MARK is returned. Since the numerical value of ERROR_MARK is zero, this macro can be used as a predicate to test whether or not NODE is an overloaded operator. */ #define DECL_OVERLOADED_OPERATOR_P(NODE) \ (IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK) /* Nonzero if NODE is an assignment operator (including += and such). */ #define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->assignment_operator_p) /* For FUNCTION_DECLs: nonzero means that this function is a constructor or a destructor with an extra in-charge parameter to control whether or not virtual bases are constructed. */ #define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p) /* Nonzero if DECL is a declaration of __builtin_constant_p. */ #define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) /* Nonzero for _DECL means that this decl appears in (or will appear in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for detecting circularity in case members are multiply defined. In the case of a VAR_DECL, it is also used to determine how program storage should be allocated. */ #define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) /* Nonzero for a VAR_DECL means that the variable's initialization (if any) has been processed. (In general, DECL_INITIALIZED_P is !DECL_EXTERN, but static data members may be initialized even if not defined.) */ #define DECL_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL iff an explicit initializer was provided. */ #define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that was initialized with a constant-expression. */ #define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) /* Nonzero if the DECL was initialized in the class definition itself, rather than outside the class. This is used for both static member VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */ #define DECL_INITIALIZED_IN_CLASS_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.initialized_in_class) /* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr]. Only available for decls with DECL_LANG_SPECIFIC. */ #define DECL_ODR_USED(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.odr_used) /* Nonzero for DECL means that this decl is just a friend declaration, and should not be added to the list of members for this class. */ #define DECL_FRIEND_P(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.friend_attr) /* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ #define DECL_BEFRIENDING_CLASSES(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* Nonzero for FUNCTION_DECL means that this decl is a static member function. */ #define DECL_STATIC_FUNCTION_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->static_function) /* Nonzero for FUNCTION_DECL means that this decl is a non-static member function. */ #define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) /* Nonzero for FUNCTION_DECL means that this decl is a member function (static or non-static). */ #define DECL_FUNCTION_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) /* Nonzero for FUNCTION_DECL means that this member function has `this' as const X *const. */ #define DECL_CONST_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for FUNCTION_DECL means that this member function has `this' as volatile X *const. */ #define DECL_VOLATILE_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for a DECL means that this member is a non-static member. */ #define DECL_NONSTATIC_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL) /* Nonzero for _DECL means that this member object type is mutable. */ #define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE)) /* Nonzero for _DECL means that this constructor or conversion function is non-converting. */ #define DECL_NONCONVERTING_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->nonconverting) /* Nonzero for FUNCTION_DECL means that this member function is a pure virtual function. */ #define DECL_PURE_VIRTUAL_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pure_virtual) /* True (in a FUNCTION_DECL) if NODE is a virtual function that is an invalid overrider for a function from a base class. Once we have complained about an invalid overrider we avoid complaining about it again. */ #define DECL_INVALID_OVERRIDER_P(NODE) \ (DECL_LANG_FLAG_4 (NODE)) /* The thunks associated with NODE, a FUNCTION_DECL. */ #define DECL_THUNKS(NODE) \ (LANG_DECL_FN_CHECK (NODE)->context) /* Nonzero if NODE is a thunk, rather than an ordinary function. */ #define DECL_THUNK_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_LANG_SPECIFIC (NODE) \ && LANG_DECL_FN_CHECK (NODE)->thunk_p) /* Set DECL_THUNK_P for node. */ #define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \ LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING)) /* Nonzero if NODE is a this pointer adjusting thunk. */ #define DECL_THIS_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a result pointer adjusting thunk. */ #define DECL_RESULT_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ #define DECL_NON_THUNK_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) /* Nonzero if NODE is `extern "C"'. */ #define DECL_EXTERN_C_P(NODE) \ (DECL_LANGUAGE (NODE) == lang_c) /* Nonzero if NODE is an `extern "C"' function. */ #define DECL_EXTERN_C_FUNCTION_P(NODE) \ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) /* True iff DECL is an entity with vague linkage whose definition is available in this translation unit. */ #define DECL_REPO_AVAILABLE_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p) /* True if DECL is declared 'constexpr'. */ #define DECL_DECLARED_CONSTEXPR_P(DECL) \ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL))) /* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a template function. */ #define DECL_PRETTY_FUNCTION_P(NODE) \ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) /* The _TYPE context in which this _DECL appears. This field holds the class where a virtual function instance is actually defined. */ #define DECL_CLASS_CONTEXT(NODE) \ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) /* For a non-member friend function, the class (if any) in which this friend was defined. For example, given: struct S { friend void f (); }; the DECL_FRIEND_CONTEXT for `f' will be `S'. */ #define DECL_FRIEND_CONTEXT(NODE) \ ((DECL_DECLARES_FUNCTION_P (NODE) \ && DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->context \ : NULL_TREE) /* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ #define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT)) #define CP_DECL_CONTEXT(NODE) \ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace) #define CP_TYPE_CONTEXT(NODE) \ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) #define FROB_CONTEXT(NODE) \ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE)) /* 1 iff NODE has namespace scope, including the global namespace. */ #define DECL_NAMESPACE_SCOPE_P(NODE) \ (!DECL_TEMPLATE_PARM_P (NODE) \ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) #define TYPE_NAMESPACE_SCOPE_P(NODE) \ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) #define NAMESPACE_SCOPE_P(NODE) \ ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \ || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE))) /* 1 iff NODE is a class member. */ #define DECL_CLASS_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) #define TYPE_CLASS_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) /* 1 iff NODE is function-local. */ #define DECL_FUNCTION_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) \ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) #define TYPE_FUNCTION_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) /* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for both the primary typeinfo object and the associated NTBS name. */ #define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)) /* 1 iff VAR_DECL node NODE is virtual table or VTT. */ #define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE)) /* Returns 1 iff VAR_DECL is a construction virtual table. DECL_VTABLE_OR_VTT_P will be true in this case and must be checked before using this macro. */ #define DECL_CONSTRUCTION_VTABLE_P(NODE) \ TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)) /* 1 iff NODE is function-local, but for types. */ #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) /* For a NAMESPACE_DECL: the list of using namespace directives The PURPOSE is the used namespace, the value is the namespace that is the common ancestor. */ #define DECL_NAMESPACE_USING(NODE) DECL_VINDEX (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users of a namespace, to record the transitive closure of using namespace. */ #define DECL_NAMESPACE_USERS(NODE) DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, the list of namespaces which have associated themselves with this one. */ #define DECL_NAMESPACE_ASSOCIATIONS(NODE) \ (NAMESPACE_DECL_CHECK (NODE)->decl_non_common.saved_tree) /* In a NAMESPACE_DECL, points to the original namespace if this is a namespace alias. */ #define DECL_NAMESPACE_ALIAS(NODE) \ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) #define ORIGINAL_NAMESPACE(NODE) \ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) /* Nonzero if NODE is the std namespace. */ #define DECL_NAMESPACE_STD_P(NODE) \ (TREE_CODE (NODE) == NAMESPACE_DECL \ && CP_DECL_CONTEXT (NODE) == global_namespace \ && DECL_NAME (NODE) == std_identifier) /* In a TREE_LIST concatenating using directives, indicate indirect directives */ #define TREE_INDIRECT_USING(NODE) (TREE_LIST_CHECK (NODE)->base.lang_flag_0) /* In a TREE_LIST in an attribute list, indicates that the attribute must be applied at instantiation time. */ #define ATTR_IS_DEPENDENT(NODE) (TREE_LIST_CHECK (NODE)->base.lang_flag_0) extern tree decl_shadowed_for_var_lookup (tree); extern void decl_shadowed_for_var_insert (tree, tree); /* Non zero if this is a using decl for a dependent scope. */ #define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) /* The scope named in a using decl. */ #define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE)) /* The decls named by a using decl. */ #define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) /* In a VAR_DECL, true if we have a shadowed local variable in the shadowed var table for this VAR_DECL. */ #define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \ (VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p) /* In a VAR_DECL for a variable declared in a for statement, this is the shadowed (local) variable. */ #define DECL_SHADOWED_FOR_VAR(NODE) \ (DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL) #define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \ (decl_shadowed_for_var_insert (NODE, VAL)) /* In a FUNCTION_DECL, this is nonzero if this function was defined in the class definition. We have saved away the text of the function, but have not yet processed it. */ #define DECL_PENDING_INLINE_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pending_inline_p) /* If DECL_PENDING_INLINE_P holds, this is the saved text of the function. */ #define DECL_PENDING_INLINE_INFO(NODE) \ (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info) /* For a class type: if this structure has many fields, we'll sort them and put them into a TREE_VEC. */ #define CLASSTYPE_SORTED_FIELDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields) /* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or TEMPLATE_DECL, the entity is either a template specialization (if DECL_USE_TEMPLATE is nonzero) or the abstract instance of the template itself. In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a specialization or abstract instance. The TREE_VALUE is the template arguments used to specialize the template. Consider: template <typename T> struct S { friend void f(T) {} }; In this case, S<int>::f is, from the point of view of the compiler, an instantiation of a template -- but, from the point of view of the language, each instantiation of S results in a wholly unrelated global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f will be non-NULL, but DECL_USE_TEMPLATE will be zero. */ #define DECL_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK (NODE)) \ ->u.min.template_info) /* For a VAR_DECL, indicates that the variable is actually a non-static data member of anonymous union that has been promoted to variable status. */ #define DECL_ANON_UNION_VAR_P(NODE) \ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) /* Template information for a RECORD_TYPE or UNION_TYPE. */ #define CLASSTYPE_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info) /* Template information for an ENUMERAL_TYPE. Although an enumeration may not be a primary template, it may be declared within the scope of a primary template and the enumeration constants may depend on non-type template parameters. */ #define ENUM_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE))) /* Template information for a template template parameter. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \ ->template_info) /* Template information for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TEMPLATE_INFO(NODE) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ ? ENUM_TEMPLATE_INFO (NODE) : \ (TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \ ? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) : \ (TYPE_LANG_SPECIFIC (NODE) \ ? CLASSTYPE_TEMPLATE_INFO (NODE) \ : NULL_TREE))) /* Set the template information for an ENUMERAL_, RECORD_, or UNION_TYPE to VAL. */ #define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ ? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \ : (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL))) #define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE)) #define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE)) #define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE) /* For a given TREE_VEC containing a template argument list, this property contains the number of arguments that are not defaulted. */ #define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE)) /* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT property. */ #define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE) #ifdef ENABLE_CHECKING #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) #else #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \ ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \ : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE)) #endif /* The list of typedefs - used in the template - that need access checking at template instantiation time. */ #define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK \ (NODE))->typedefs_needing_access_checking /* We use TREE_VECs to hold template arguments. If there is only one level of template arguments, then the TREE_VEC contains the arguments directly. If there is more than one level of template arguments, then each entry in the TREE_VEC is itself a TREE_VEC, containing the template arguments for a single level. The first entry in the outer TREE_VEC is the outermost level of template parameters; the last is the innermost. It is incorrect to ever form a template argument vector containing only one level of arguments, but which is a TREE_VEC containing as its only entry the TREE_VEC for that level. For each TREE_VEC containing the template arguments for a single level, it's possible to get or set the number of non defaulted template arguments by using the accessor macros GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */ /* Nonzero if the template arguments is actually a vector of vectors, rather than just a vector. */ #define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) /* The depth of a template argument vector. When called directly by the parser, we use a TREE_LIST rather than a TREE_VEC to represent template arguments. In fact, we may even see NULL_TREE if there are no template arguments. In both of those cases, there is only one level of template arguments. */ #define TMPL_ARGS_DEPTH(NODE) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1) /* The LEVELth level of the template ARGS. The outermost level of args is level 1, not level 0. */ #define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) /* Set the LEVELth level of the template ARGS to VAL. This macro does not work with single-level argument vectors. */ #define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) /* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ #define TMPL_ARG(ARGS, LEVEL, IDX) \ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) /* Given a single level of template arguments in NODE, return the number of arguments. */ #define NUM_TMPL_ARGS(NODE) \ (TREE_VEC_LENGTH (NODE)) /* Returns the innermost level of template arguments in ARGS. */ #define INNERMOST_TEMPLATE_ARGS(NODE) \ (get_innermost_template_args ((NODE), 1)) /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general template. For example, in: template <class T> struct S { template <class U> void f(U); } the FUNCTION_DECL for S<int>::f<double> will have, as its DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'. As a special case, for a member friend template of a template class, this value will not be a TEMPLATE_DECL, but rather an IDENTIFIER_NODE or OVERLOAD indicating the name of the template and any explicit template arguments provided. For example, in: template <class T> struct S { friend void f<int>(int, double); } the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the DECL_TI_ARGS will be {int}. */ #define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) /* The template arguments used to obtain this decl from the most general form of DECL_TI_TEMPLATE. For the example given for DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These are always the full set of arguments required to instantiate this declaration from the most general template specialized here. */ #define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) /* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE will be generated from a partial specialization, the TEMPLATE_DECL referred to here will be the original template. For example, given: template <typename T> struct S {}; template <typename T> struct S<T*> {}; the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */ #define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) #define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) /* For a template instantiation TYPE, returns the TYPE corresponding to the primary template. Otherwise returns TYPE itself. */ #define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ : (TYPE)) /* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ #define TYPE_TI_TEMPLATE(NODE) \ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) /* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TI_ARGS(NODE) \ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) #define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) /* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the sense of [temp.mem]. */ #define DECL_MEMBER_TEMPLATE_P(NODE) \ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) /* Nonzero if the NODE corresponds to the template parameters for a member template, whose inline definition is being processed after the class definition is complete. */ #define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) /* Determine if a parameter (i.e., a PARM_DECL) is a function parameter pack. */ #define FUNCTION_PARAMETER_PACK_P(NODE) \ (DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))) /* Determines if NODE is an expansion of one or more parameter packs, e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_P(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ || TREE_CODE (NODE) == EXPR_PACK_EXPANSION) /* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_PATTERN(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* The list of parameter packs used in the PACK_EXPANSION_* node. The TREE_VALUE of each TREE_LIST contains the parameter packs. */ #define PACK_EXPANSION_PARAMETER_PACKS(NODE) TREE_CHAIN (NODE) /* Determine if this is an argument pack. */ #define ARGUMENT_PACK_P(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \ || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK) /* The arguments stored in an argument pack. Arguments are stored in a TREE_VEC, which may have length zero. */ #define ARGUMENT_PACK_ARGS(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Set the arguments stored in an argument pack. VALUE must be a TREE_VEC. */ #define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* Whether the argument pack is "incomplete", meaning that more arguments can still be deduced. Incomplete argument packs are only used when the user has provided an explicit template argument list for a variadic function template. Some of the explicit template arguments will be placed into the beginning of the argument pack, but additional arguments might still be deduced. */ #define ARGUMENT_PACK_INCOMPLETE_P(NODE) \ TREE_LANG_FLAG_0 (ARGUMENT_PACK_ARGS (NODE)) /* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template arguments used to fill this pack. */ #define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \ TREE_TYPE (ARGUMENT_PACK_ARGS (NODE)) /* In an ARGUMENT_PACK_SELECT, the argument pack from which an argument will be selected. */ #define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack) /* In an ARGUMENT_PACK_SELECT, the index of the argument we want to select. */ #define ARGUMENT_PACK_SELECT_INDEX(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index) /* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the ARGUMENT_PACK_SELECT represents. */ #define ARGUMENT_PACK_SELECT_ARG(NODE) \ TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \ ARGUMENT_PACK_SELECT_INDEX (NODE)); /* In a FUNCTION_DECL, the saved language-specific per-function data. */ #define DECL_SAVED_FUNCTION_DATA(NODE) \ (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \ ->u.saved_language_function) /* Indicates an indirect_expr is for converting a reference. */ #define REFERENCE_REF_P(NODE) \ TREE_LANG_FLAG_0 (INDIRECT_REF_CHECK (NODE)) #define NEW_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_VEC(NODE) \ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) /* Indicates that this is a non-dependent COMPOUND_EXPR which will resolve to a function call. */ #define COMPOUND_EXPR_OVERLOADED(NODE) \ TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE)) /* In a CALL_EXPR appearing in a template, true if Koenig lookup should be performed at instantiation time. */ #define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) /* Indicates whether a string literal has been parenthesized. Such usages are disallowed in certain circumstances. */ #define PAREN_STRING_LITERAL_P(NODE) \ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) /* Nonzero if this AGGR_INIT_EXPR provides for initialization via a constructor call, rather than an ordinary function call. */ #define AGGR_INIT_VIA_CTOR_P(NODE) \ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize the object. */ #define AGGR_INIT_ZERO_FIRST(NODE) \ TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE)) /* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of CALL_EXPR_STATIC_CHAIN). */ #define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1) #define AGGR_INIT_EXPR_SLOT(NODE) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2) #define AGGR_INIT_EXPR_ARG(NODE, I) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3) #define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3) /* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE. We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if the argument count is zero when checking is enabled. Instead, do the pointer arithmetic to advance past the 3 fixed operands in a AGGR_INIT_EXPR. That produces a valid pointer to just past the end of the operand array, even if it's not valid to dereference it. */ #define AGGR_INIT_EXPR_ARGP(NODE) \ (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3) /* Abstract iterators for AGGR_INIT_EXPRs. */ /* Structure containing iterator state. */ typedef struct aggr_init_expr_arg_iterator_d { tree t; /* the aggr_init_expr */ int n; /* argument count */ int i; /* next argument index */ } aggr_init_expr_arg_iterator; /* Initialize the abstract argument list iterator object ITER with the arguments from AGGR_INIT_EXPR node EXP. */ static inline void init_aggr_init_expr_arg_iterator (tree exp, aggr_init_expr_arg_iterator *iter) { iter->t = exp; iter->n = aggr_init_expr_nargs (exp); iter->i = 0; } /* Return the next argument from abstract argument list iterator object ITER, and advance its state. Return NULL_TREE if there are no more arguments. */ static inline tree next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter) { tree result; if (iter->i >= iter->n) return NULL_TREE; result = AGGR_INIT_EXPR_ARG (iter->t, iter->i); iter->i++; return result; } /* Initialize the abstract argument list iterator object ITER, then advance past and return the first argument. Useful in for expressions, e.g. for (arg = first_aggr_init_expr_arg (exp, &iter); arg; arg = next_aggr_init_expr_arg (&iter)) */ static inline tree first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter) { init_aggr_init_expr_arg_iterator (exp, iter); return next_aggr_init_expr_arg (iter); } /* Test whether there are more arguments in abstract argument list iterator ITER, without changing its state. */ static inline bool more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) { return (iter->i < iter->n); } /* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */ #define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \ for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \ (arg) = next_aggr_init_expr_arg (&(iter))) /* VEC_INIT_EXPR accessors. */ #define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (NODE, 0) #define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (NODE, 1) /* Indicates that a VEC_INIT_EXPR is a potential constant expression. Only set when the current function is constexpr. */ #define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \ TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE)) /* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */ #define VEC_INIT_EXPR_VALUE_INIT(NODE) \ TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE)) /* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a TEMPLATE_DECL. This macro determines whether or not a given class type is really a template type, as opposed to an instantiation or specialization of one. */ #define CLASSTYPE_IS_TEMPLATE(NODE) \ (CLASSTYPE_TEMPLATE_INFO (NODE) \ && !CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) /* The name used by the user to name the typename type. Typically, this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the corresponding TYPE_DECL. However, this may also be a TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */ #define TYPENAME_TYPE_FULLNAME(NODE) (TYPENAME_TYPE_CHECK (NODE))->type.values /* True if a TYPENAME_TYPE was declared as an "enum". */ #define TYPENAME_IS_ENUM_P(NODE) \ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as a "class", "struct", or "union". */ #define TYPENAME_IS_CLASS_P(NODE) \ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE is in the process of being resolved. */ #define TYPENAME_IS_RESOLVING_P(NODE) \ (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE))) /* Nonzero in INTEGER_CST means that this int is negative by dint of using a twos-complement negated operand. */ #define TREE_NEGATED_INT(NODE) TREE_LANG_FLAG_0 (INTEGER_CST_CHECK (NODE)) /* [class.virtual] A class that declares or inherits a virtual function is called a polymorphic class. */ #define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) /* Nonzero if this class has a virtual function table pointer. */ #define TYPE_CONTAINS_VPTR_P(NODE) \ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) /* This flag is true of a local VAR_DECL if it was declared in a for statement, but we are no longer in the scope of the for. */ #define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE)) /* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL if we already emitted a warning about using it. */ #define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)) /* Nonzero if NODE is a FUNCTION_DECL (for a function with global scope) declared in a local scope. */ #define DECL_LOCAL_FUNCTION_P(NODE) \ DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is a DECL which we know about but which has not been explicitly declared, such as a built-in function or a friend declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P will be set. */ #define DECL_ANTICIPATED(NODE) \ (DECL_LANG_SPECIFIC (DECL_COMMON_CHECK (NODE))->u.base.anticipated_p) /* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend within a class but has not been declared in the surrounding scope. The function is invisible except via argument dependent lookup. */ #define DECL_HIDDEN_FRIEND_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p) /* Nonzero if DECL has been declared threadprivate by #pragma omp threadprivate. */ #define CP_DECL_THREADPRIVATE_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= delete'. */ #define DECL_DELETED_FN(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= default' (maybe implicitly). */ #define DECL_DEFAULTED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->defaulted_p) /* Nonzero if DECL is explicitly defaulted in the class body. */ #define DECL_DEFAULTED_IN_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL)) /* Nonzero if DECL was defaulted outside the class body. */ #define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) \ && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL))) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* Returns nonzero if DECL has external linkage, as specified by the language standard. (This predicate may hold even when the corresponding entity is not actually given external linkage in the object file; see decl_linkage for details.) */ #define DECL_EXTERNAL_LINKAGE_P(DECL) \ (decl_linkage (DECL) == lk_external) /* Keep these codes in ascending code order. */ #define INTEGRAL_CODE_P(CODE) \ ((CODE) == ENUMERAL_TYPE \ || (CODE) == BOOLEAN_TYPE \ || (CODE) == INTEGER_TYPE) /* [basic.fundamental] Types bool, char, wchar_t, and the signed and unsigned integer types are collectively called integral types. Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration types as well, which is incorrect in C++. Keep these checks in ascending code order. */ #define CP_INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || TREE_CODE (TYPE) == INTEGER_TYPE) /* Returns true if TYPE is an integral or enumeration name. Keep these checks in ascending code order. */ #define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) /* Returns true if TYPE is an integral or unscoped enumeration type. */ #define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \ (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE)) /* True if the class type TYPE is a literal type. */ #define CLASSTYPE_LITERAL_P(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal) /* [basic.fundamental] Integral and floating types are collectively called arithmetic types. As a GNU extension, we also accept complex types. Keep these checks in ascending code order. */ #define ARITHMETIC_TYPE_P(TYPE) \ (CP_INTEGRAL_TYPE_P (TYPE) \ || TREE_CODE (TYPE) == REAL_TYPE \ || TREE_CODE (TYPE) == COMPLEX_TYPE) /* True iff TYPE is cv decltype(nullptr). */ #define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) /* [basic.types] Arithmetic types, enumeration types, pointer types, pointer-to-member types, and std::nullptr_t are collectively called scalar types. Keep these checks in ascending code order. */ #define SCALAR_TYPE_P(TYPE) \ (TYPE_PTRMEM_P (TYPE) \ || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || ARITHMETIC_TYPE_P (TYPE) \ || TYPE_PTR_P (TYPE) \ || TYPE_PTRMEMFUNC_P (TYPE) \ || NULLPTR_TYPE_P (TYPE)) /* Determines whether this type is a C++0x scoped enumeration type. Scoped enumerations types are introduced via "enum class" or "enum struct", e.g., enum class Color { Red, Green, Blue }; Scoped enumeration types are different from normal (unscoped) enumeration types in several ways: - The enumerators of a scoped enumeration type are only available within the scope of the enumeration type and not in the enclosing scope. For example, the Red color can be referred to with "Color::Red" but not "Red". - Scoped enumerators and enumerations do not implicitly convert to integers or 'bool'. - The underlying type of the enum is well-defined. */ #define SCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE)) /* Determine whether this is an unscoped enumeration type. */ #define UNSCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE)) /* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped enumeration type (1) or a normal (unscoped) enumeration type (0). */ #define SET_SCOPED_ENUM_P(TYPE, VAL) \ (ENUM_IS_SCOPED (TYPE) = (VAL)) #define SET_OPAQUE_ENUM_P(TYPE, VAL) \ (ENUM_IS_OPAQUE (TYPE) = (VAL)) #define OPAQUE_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE)) /* Determines whether an ENUMERAL_TYPE has an explicit underlying type. */ #define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE)) /* Returns the underlying type of the given enumeration type. The underlying type is determined in different ways, depending on the properties of the enum: - In C++0x, the underlying type can be explicitly specified, e.g., enum E1 : char { ... } // underlying type is char - In a C++0x scoped enumeration, the underlying type is int unless otherwises specified: enum class E2 { ... } // underlying type is int - Otherwise, the underlying type is determined based on the values of the enumerators. In this case, the ENUM_UNDERLYING_TYPE will not be set until after the definition of the enumeration is completed by finish_enum. */ #define ENUM_UNDERLYING_TYPE(TYPE) \ TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE)) /* [dcl.init.aggr] An aggregate is an array or a class with no user-declared constructors, no private or protected non-static data members, no base classes, and no virtual functions. As an extension, we also treat vectors as aggregates. Keep these checks in ascending code order. */ #define CP_AGGREGATE_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == VECTOR_TYPE \ ||TREE_CODE (TYPE) == ARRAY_TYPE \ || (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) /* Nonzero for a class type means that the class type has a user-declared constructor. */ #define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) /* When appearing in an INDIRECT_REF, it means that the tree structure underneath is actually a call to a constructor. This is needed when the constructor must initialize local storage (which can be automatically destroyed), rather than allowing it to allocate space from the heap. When appearing in a SAVE_EXPR, it means that underneath is a call to a constructor. When appearing in a CONSTRUCTOR, the expression is a compound literal. When appearing in a FIELD_DECL, it means that this field has been duly initialized in its constructor. */ #define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) /* True if NODE is a brace-enclosed initializer. */ #define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node) /* True if NODE is a compound-literal, i.e., a brace-enclosed initializer cast to a particular type. */ #define COMPOUND_LITERAL_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) #define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ && VEC_empty (constructor_elt, \ CONSTRUCTOR_ELTS (NODE)) \ && !TREE_HAS_CONSTRUCTOR (NODE)) /* True if NODE is a init-list used as a direct-initializer, i.e. B b{1,2}, not B b({1,2}) or B b = {1,2}. */ #define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE))) /* Nonzero means that an object of this type can not be initialized using an initializer list. */ #define CLASSTYPE_NON_AGGREGATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) #define TYPE_NON_AGGREGATE_CLASS(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) /* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign) /* Nonzero if there is a non-trivial X::X(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor) /* Nonzero if there is a non-trivial X::op=(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign) /* Nonzero if there is a non-trivial X::X(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor) /* Nonzero if there is a non-trivial default constructor for this class. */ #define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt) /* Nonzero if TYPE has a trivial destructor. From [class.dtor]: A destructor is trivial if it is an implicitly declared destructor and if: - all of the direct base classes of its class have trivial destructors, - for all of the non-static data members of its class that are of class type (or array thereof), each such class has a trivial destructor. */ #define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) /* Nonzero for _TYPE node means that this type does not have a trivial destructor. Therefore, destroying an object of this type will involve a call to a destructor. This can apply to objects of ARRAY_TYPE is the type of the elements needs a destructor. */ #define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ (TYPE_LANG_FLAG_4 (NODE)) /* Nonzero for class type means that the default constructor is trivial. */ #define TYPE_HAS_TRIVIAL_DFLT(NODE) \ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE)) /* Nonzero for class type means that copy initialization of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \ (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE)) /* Nonzero for class type means that assignment of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \ (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE)) /* Returns true if NODE is a pointer-to-data-member. */ #define TYPE_PTRMEM_P(NODE) \ (TREE_CODE (NODE) == OFFSET_TYPE) /* Returns true if NODE is a pointer. */ #define TYPE_PTR_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE) /* Returns true if NODE is an object type: [basic.types] An object type is a (possibly cv-qualified) type that is not a function type, not a reference type, and not a void type. Keep these checks in ascending order, for speed. */ #define TYPE_OBJ_P(NODE) \ (TREE_CODE (NODE) != REFERENCE_TYPE \ && TREE_CODE (NODE) != VOID_TYPE \ && TREE_CODE (NODE) != FUNCTION_TYPE \ && TREE_CODE (NODE) != METHOD_TYPE) /* Returns true if NODE is a pointer to an object. Keep these checks in ascending tree code order. */ #define TYPE_PTROB_P(NODE) \ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a reference to an object. Keep these checks in ascending tree code order. */ #define TYPE_REF_OBJ_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to an object, or a pointer to void. Keep these checks in ascending tree code order. */ #define TYPE_PTROBV_P(NODE) \ (TYPE_PTR_P (NODE) \ && !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \ || TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)) /* Returns true if NODE is a pointer to function. */ #define TYPE_PTRFN_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a reference to function. */ #define TYPE_REFFN_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Nonzero for _TYPE node means that this type is a pointer to member function type. */ #define TYPE_PTRMEMFUNC_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_LANG_SPECIFIC (NODE) \ && TYPE_PTRMEMFUNC_FLAG (NODE)) #define TYPE_PTRMEMFUNC_FLAG(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->ptrmemfunc_flag) /* Returns true if NODE is a pointer-to-member. */ #define TYPE_PTR_TO_MEMBER_P(NODE) \ (TYPE_PTRMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) /* Indicates when overload resolution may resolve to a pointer to member function. [expr.unary.op]/3 */ #define PTRMEM_OK_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK2 ((NODE), ADDR_EXPR, OFFSET_REF)) /* Get the POINTER_TYPE to the METHOD_TYPE associated with this pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, before using this macro. */ #define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ (TREE_TYPE (TYPE_FIELDS (NODE))) /* Returns `A' for a type like `int (A::*)(double)' */ #define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* These are use to manipulate the canonical RECORD_TYPE from the hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */ #define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \ (TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL) #define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \ do { \ if (TYPE_LANG_SPECIFIC (NODE) == NULL) \ { \ TYPE_LANG_SPECIFIC (NODE) = ggc_alloc_cleared_lang_type \ (sizeof (struct lang_type_ptrmem)); \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \ } \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \ } while (0) /* For a pointer-to-member type of the form `T X::*', this is `X'. For a type like `void (X::*)() const', this type is `X', not `const X'. To get at the `const X' you have to look at the TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have type `const X*'. */ #define TYPE_PTRMEM_CLASS_TYPE(NODE) \ (TYPE_PTRMEM_P (NODE) \ ? TYPE_OFFSET_BASETYPE (NODE) \ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) /* For a pointer-to-member type of the form `T X::*', this is `T'. */ #define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ (TYPE_PTRMEM_P (NODE) \ ? TREE_TYPE (NODE) \ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for `X'. */ #define PTRMEM_CST_CLASS(NODE) \ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) /* For a pointer-to-member constant `X::Y' this is the _DECL for `Y'. */ #define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) /* The expression in question for a TYPEOF_TYPE. */ #define TYPEOF_TYPE_EXPR(NODE) (TYPEOF_TYPE_CHECK (NODE))->type.values /* The expression in question for a DECLTYPE_TYPE. */ #define DECLTYPE_TYPE_EXPR(NODE) (DECLTYPE_TYPE_CHECK (NODE))->type.values /* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an id-expression or a member-access expression. When false, it was parsed as a full expression. */ #define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \ (DECLTYPE_TYPE_CHECK (NODE))->type.string_flag /* These flags indicate that we want different semantics from normal decltype: lambda capture just drops references, lambda return also does type decay. */ #define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \ TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_LAMBDA_RETURN(NODE) \ TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_EXTERN(NODE) \ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_STATIC(NODE) \ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a base class of the parent object, as opposed to a member field. */ #define DECL_FIELD_IS_BASE(NODE) \ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a simple (no explicit initializer) lambda capture field, making it invisible to name lookup in unevaluated contexts. */ #define DECL_NORMAL_CAPTURE_P(NODE) \ DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE)) /* Nonzero if TYPE is an anonymous union or struct type. We have to use a flag for this because "A union for which objects or pointers are declared is not an anonymous union" [class.union]. */ #define ANON_AGGR_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) #define SET_ANON_AGGR_TYPE_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) /* Nonzero if TYPE is an anonymous union type. */ #define ANON_UNION_TYPE_P(NODE) \ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) /* Define fields and accessors for nodes representing declared names. */ #define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous) /* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ /* The format of each node in the DECL_FRIENDLIST is as follows: The TREE_PURPOSE will be the name of a function, i.e., an IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose TREE_VALUEs are friends with the given name. */ #define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) #define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) #define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) /* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of each node is a type; the TREE_VALUE is the access granted for this DECL in that type. The DECL_ACCESS is set by access declarations. For example, if a member that would normally be public in a derived class is made protected, then the derived class and the protected_access_node will appear in the DECL_ACCESS for the node. */ #define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access) /* Nonzero if the FUNCTION_DECL is a global constructor. */ #define DECL_GLOBAL_CTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_ctor_p) /* Nonzero if the FUNCTION_DECL is a global destructor. */ #define DECL_GLOBAL_DTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_dtor_p) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node is a INT_CST whose TREE_INT_CST_LOW indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a non-type parameter), or a TYPE_DECL (if the parameter is a type parameter). The TREE_PURPOSE is the default value, if any. The TEMPLATE_PARM_INDEX for the parameter is available as the DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a TYPE_DECL). */ #define DECL_TEMPLATE_PARMS(NODE) DECL_NON_COMMON_CHECK (NODE)->decl_non_common.arguments #define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) #define DECL_NTPARMS(NODE) \ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) /* For function, method, class-data templates. */ #define DECL_TEMPLATE_RESULT(NODE) DECL_RESULT_FLD (NODE) /* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS lists all instantiations and specializations of the function so that tsubst_friend_function can reassign them to another template if we find that the namespace-scope template is really a partial instantiation of a friend template. For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds all instantiations and specializations of the class type, including partial instantiations and partial specializations, so that if we explicitly specialize a partial instantiation we can walk the list in maybe_process_partial_specialization and reassign them or complain as appropriate. In both cases, the TREE_PURPOSE of each node contains the arguments used; the TREE_VALUE contains the generated variable. The template arguments are always complete. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; the record for the partial specialization will contain, as its argument list, { {T}, {U*} }, and will be on the DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template <class U> struct S1<T>::S2'. This list is not used for other templates. */ #define DECL_TEMPLATE_INSTANTIATIONS(NODE) DECL_VINDEX (NODE) /* For a class template, this list contains the partial specializations of this template. (Full specializations are not recorded on this list.) The TREE_PURPOSE holds the arguments used in the partial specialization (e.g., for `template <class T> struct S<T*, int>' this will be `T*'.) The arguments will also include any outer template arguments. The TREE_VALUE holds the innermost template parameters for the specialization (e.g., `T' in the example above.) The TREE_TYPE is the _TYPE node for the partial specialization. This list is not used for other templates. */ #define DECL_TEMPLATE_SPECIALIZATIONS(NODE) DECL_SIZE (NODE) /* Nonzero for a DECL which is actually a template parameter. Keep these checks in ascending tree code order. */ #define DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) \ && (TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == PARM_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL)) /* Mark NODE as a template parameter. */ #define SET_DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) = 1) /* Nonzero if NODE is a template template parameter. */ #define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) /* Nonzero if NODE is a TEMPLATE_DECL representing an UNBOUND_CLASS_TEMPLATE tree node. */ #define DECL_UNBOUND_CLASS_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && !DECL_TEMPLATE_RESULT (NODE)) #define DECL_FUNCTION_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && !DECL_UNBOUND_CLASS_TEMPLATE_P (NODE) \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) /* Nonzero for a DECL that represents a template class. */ #define DECL_CLASS_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero if NODE which declares a type. */ #define DECL_DECLARES_TYPE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || DECL_CLASS_TEMPLATE_P (NODE)) /* Nonzero if NODE declares a function. */ #define DECL_DECLARES_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE)) /* Nonzero if NODE is the typedef implicitly generated for a type when the type is declared. In C++, `struct S {};' is roughly equivalent to `struct S {}; typedef struct S S;' in C. DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this example. In C++, there is a second implicit typedef for each class, in the scope of `S' itself, so that you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that second typedef. */ #define DECL_IMPLICIT_TYPEDEF_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) #define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ (DECL_LANG_FLAG_2 (NODE) = 1) #define DECL_SELF_REFERENCE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) #define SET_DECL_SELF_REFERENCE_P(NODE) \ (DECL_LANG_FLAG_4 (NODE) = 1) /* A `primary' template is one that has its own template header. A member function of a class template is a template, but not primary. A member template is primary. Friend templates are primary, too. */ /* Returns the primary template corresponding to these parameters. */ #define DECL_PRIMARY_TEMPLATE(NODE) \ (TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) /* Returns nonzero if NODE is a primary template. */ #define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) /* Nonzero iff NODE is a specialization of a template. The value indicates the type of specializations: 1=implicit instantiation 2=partial or explicit specialization, e.g.: template <> int min<int> (int, int), 3=explicit instantiation, e.g.: template int min<int> (int, int); Note that NODE will be marked as a specialization even if the template it is instantiating is not a primary template. For example, given: template <typename T> struct O { void f(); struct I {}; }; both O<int>::f and O<int>::I will be marked as instantiations. If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also be non-NULL. */ #define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template) /* Like DECL_USE_TEMPLATE, but for class types. */ #define CLASSTYPE_USE_TEMPLATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->use_template) /* True if NODE is a specialization of a primary template. */ #define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ (CLASS_TYPE_P (NODE) \ && CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) #define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) #define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) & 1) #define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) #define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) /* Returns true for an explicit or partial specialization of a class template. */ #define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 2) #define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 2) #define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) #define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) #define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 1) #define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 1) #define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) #define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) #define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 3) #define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 3) /* Nonzero if DECL is a friend function which is an instantiation from the point of view of the compiler, but not from the point of view of the language. For example given: template <class T> struct S { friend void f(T) {}; }; the declaration of `void f(int)' generated when S<int> is instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */ #define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL)) /* Nonzero iff we are currently processing a declaration for an entity with its own template parameter list, and which is not a full specialization. */ #define PROCESSING_REAL_TEMPLATE_DECL_P() \ (processing_template_decl > template_class_depth (current_scope ())) /* Nonzero if this VAR_DECL or FUNCTION_DECL has already been instantiated, i.e. its definition has been generated from the pattern given in the template. */ #define DECL_TEMPLATE_INSTANTIATED(NODE) \ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) /* We know what we're doing with this decl now. */ #define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) /* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, so that assemble_external will work properly. So we have this flag to tell us whether the decl is really not external. This flag does not indicate whether or not the decl is defined in the current translation unit; it indicates whether or not we should emit the decl at the end of compilation if it is defined and needed. */ #define DECL_NOT_REALLY_EXTERN(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern) #define DECL_REALLY_EXTERN(NODE) \ (DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE)) /* A thunk is a stub function. A thunk is an alternate entry point for an ordinary FUNCTION_DECL. The address of the ordinary FUNCTION_DECL is given by the DECL_INITIAL, which is always an ADDR_EXPR whose operand is a FUNCTION_DECL. The job of the thunk is to either adjust the this pointer before transferring control to the FUNCTION_DECL, or call FUNCTION_DECL and then adjust the result value. Note, the result pointer adjusting thunk must perform a call to the thunked function, (or be implemented via passing some invisible parameter to the thunked function, which is modified to perform the adjustment just before returning). A thunk may perform either, or both, of the following operations: o Adjust the this or result pointer by a constant offset. o Adjust the this or result pointer by looking up a vcall or vbase offset in the vtable. A this pointer adjusting thunk converts from a base to a derived class, and hence adds the offsets. A result pointer adjusting thunk converts from a derived class to a base, and hence subtracts the offsets. If both operations are performed, then the constant adjustment is performed first for this pointer adjustment and last for the result pointer adjustment. The constant adjustment is given by THUNK_FIXED_OFFSET. If the vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is used. For this pointer adjusting thunks, it is the vcall offset into the vtable. For result pointer adjusting thunks it is the binfo of the virtual base to convert to. Use that binfo's vbase offset. It is possible to have equivalent covariant thunks. These are distinct virtual covariant thunks whose vbase offsets happen to have the same value. THUNK_ALIAS is used to pick one as the canonical thunk, which will get all the this pointer adjusting thunks attached to it. */ /* An integer indicating how many bytes should be subtracted from the this or result pointer when this function is called. */ #define THUNK_FIXED_OFFSET(DECL) \ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset) /* A tree indicating how to perform the virtual adjustment. For a this adjusting thunk it is the number of bytes to be added to the vtable to find the vcall offset. For a result adjusting thunk, it is the binfo of the relevant virtual base. If NULL, then there is no virtual adjust. (The vptr is always located at offset zero from the this or result pointer.) (If the covariant type is within the class hierarchy being laid out, the vbase index is not yet known at the point we need to create the thunks, hence the need to use binfos.) */ #define THUNK_VIRTUAL_OFFSET(DECL) \ (LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access) /* A thunk which is equivalent to another thunk. */ #define THUNK_ALIAS(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info) /* For thunk NODE, this is the FUNCTION_DECL thunked to. It is possible for the target to be a thunk too. */ #define THUNK_TARGET(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* True for a SCOPE_REF iff the "template" keyword was used to indicate that the qualified name denotes a template. */ #define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ (TREE_LANG_FLAG_0 (SCOPE_REF_CHECK (NODE))) /* True for an OMP_ATOMIC that has dependent parameters. These are stored as an expr in operand 1, and integer_zero_node in operand 0. */ #define OMP_ATOMIC_DEPENDENT_P(NODE) \ (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST) /* Used while gimplifying continue statements bound to OMP_FOR nodes. */ #define OMP_FOR_GIMPLIFYING_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_FOR_CHECK (NODE))) /* A language-specific token attached to the OpenMP data clauses to hold code (or code fragments) related to ctors, dtors, and op=. See semantics.c for details. */ #define CP_OMP_CLAUSE_INFO(NODE) \ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ OMP_CLAUSE_COPYPRIVATE)) /* These macros provide convenient access to the various _STMT nodes created when parsing template declarations. */ #define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) #define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) #define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) #define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) #define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) /* Nonzero if this try block is a function try block. */ #define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) #define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) #define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) #define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) /* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run and the VAR_DECL for which this cleanup exists. */ #define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) #define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) #define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) /* IF_STMT accessors. These give access to the condition of the if statement, the then block of the if statement, and the else block of the if statement if it exists. */ #define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) #define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) #define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) /* RANGE_FOR_STMT accessors. These give access to the declarator, expression and body of the statement, respectively. */ #define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0) #define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1) #define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2) #define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) #define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) #define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* True if this TARGET_EXPR was created by build_cplus_new, and so we can discard it if it isn't useful. */ #define TARGET_EXPR_IMPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR is the result of list-initialization of a temporary. */ #define TARGET_EXPR_LIST_INIT_P(NODE) \ TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR expresses direct-initialization of an object to be named later. */ #define TARGET_EXPR_DIRECT_INIT_P(NODE) \ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE)) /* True if EXPR expresses direct-initialization of a TYPE. */ #define DIRECT_INIT_EXPR_P(TYPE,EXPR) \ (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \ && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR))) /* An enumeration of the kind of tags that C++ accepts. */ enum tag_types { none_type = 0, /* Not a tag type. */ record_type, /* "struct" types. */ class_type, /* "class" types. */ union_type, /* "union" types. */ enum_type, /* "enum" types. */ typename_type /* "typename" types. */ }; /* The various kinds of lvalues we distinguish. */ enum cp_lvalue_kind_flags { clk_none = 0, /* Things that are not an lvalue. */ clk_ordinary = 1, /* An ordinary lvalue. */ clk_rvalueref = 2,/* An rvalue formed using an rvalue reference */ clk_class = 4, /* An rvalue of class-type. */ clk_bitfield = 8, /* An lvalue for a bit-field. */ clk_packed = 16 /* An lvalue for a packed field. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum cp_lvalue_kind_flags. */ typedef int cp_lvalue_kind; /* Various kinds of template specialization, instantiation, etc. */ typedef enum tmpl_spec_kind { tsk_none, /* Not a template at all. */ tsk_invalid_member_spec, /* An explicit member template specialization, but the enclosing classes have not all been explicitly specialized. */ tsk_invalid_expl_inst, /* An explicit instantiation containing template parameter lists. */ tsk_excessive_parms, /* A template declaration with too many template parameter lists. */ tsk_insufficient_parms, /* A template declaration with too few parameter lists. */ tsk_template, /* A template declaration. */ tsk_expl_spec, /* An explicit specialization. */ tsk_expl_inst /* An explicit instantiation. */ } tmpl_spec_kind; /* The various kinds of access. BINFO_ACCESS depends on these being two bit quantities. The numerical values are important; they are used to initialize RTTI data structures, so changing them changes the ABI. */ typedef enum access_kind { ak_none = 0, /* Inaccessible. */ ak_public = 1, /* Accessible, as a `public' thing. */ ak_protected = 2, /* Accessible, as a `protected' thing. */ ak_private = 3 /* Accessible, as a `private' thing. */ } access_kind; /* The various kinds of special functions. If you add to this list, you should update special_function_p as well. */ typedef enum special_function_kind { sfk_none = 0, /* Not a special function. This enumeral must have value zero; see special_function_p. */ sfk_constructor, /* A constructor. */ sfk_copy_constructor, /* A copy constructor. */ sfk_move_constructor, /* A move constructor. */ sfk_copy_assignment, /* A copy assignment operator. */ sfk_move_assignment, /* A move assignment operator. */ sfk_destructor, /* A destructor. */ sfk_complete_destructor, /* A destructor for complete objects. */ sfk_base_destructor, /* A destructor for base subobjects. */ sfk_deleting_destructor, /* A destructor for complete objects that deletes the object after it has been destroyed. */ sfk_conversion /* A conversion operator. */ } special_function_kind; /* The various kinds of linkage. From [basic.link], A name is said to have linkage when it might denote the same object, reference, function, type, template, namespace or value as a name introduced in another scope: -- When a name has external linkage, the entity it denotes can be referred to from scopes of other translation units or from other scopes of the same translation unit. -- When a name has internal linkage, the entity it denotes can be referred to by names from other scopes in the same translation unit. -- When a name has no linkage, the entity it denotes cannot be referred to by names from other scopes. */ typedef enum linkage_kind { lk_none, /* No linkage. */ lk_internal, /* Internal linkage. */ lk_external /* External linkage. */ } linkage_kind; typedef enum duration_kind { dk_static, dk_thread, dk_auto, dk_dynamic } duration_kind; /* Bitmask flags to control type substitution. */ enum tsubst_flags { tf_none = 0, /* nothing special */ tf_error = 1 << 0, /* give error messages */ tf_warning = 1 << 1, /* give warnings too */ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ tf_keep_type_decl = 1 << 3, /* retain typedef type decls (make_typename_type use) */ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal instantiate_type use) */ tf_user = 1 << 5, /* found template must be a user template (lookup_template_class use) */ tf_conv = 1 << 6, /* We are determining what kind of conversion might be permissible, not actually performing the conversion. */ tf_no_access_control = 1 << 7, /* Do not perform access checks, even when issuing other errors. */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error }; /* This type is used for parameters and variables which hold combinations of the flags in enum tsubst_flags. */ typedef int tsubst_flags_t; /* The kind of checking we can do looking in a class hierarchy. */ enum base_access_flags { ba_any = 0, /* Do not check access, allow an ambiguous base, prefer a non-virtual base */ ba_unique = 1 << 0, /* Must be a unique base. */ ba_check_bit = 1 << 1, /* Check access. */ ba_check = ba_unique | ba_check_bit, ba_ignore_scope = 1 << 2, /* Ignore access allowed by local scope. */ ba_quiet = 1 << 3 /* Do not issue error messages. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum base_access_flags. */ typedef int base_access; /* The various kinds of access check during parsing. */ typedef enum deferring_kind { dk_no_deferred = 0, /* Check access immediately */ dk_deferred = 1, /* Deferred check */ dk_no_check = 2 /* No access check */ } deferring_kind; /* The kind of base we can find, looking in a class hierarchy. Values <0 indicate we failed. */ typedef enum base_kind { bk_inaccessible = -3, /* The base is inaccessible */ bk_ambig = -2, /* The base is ambiguous */ bk_not_base = -1, /* It is not a base */ bk_same_type = 0, /* It is the same type */ bk_proper_base = 1, /* It is a proper base */ bk_via_virtual = 2 /* It is a proper base, but via a virtual path. This might not be the canonical binfo. */ } base_kind; /* Node for "pointer to (virtual) function". This may be distinct from ptr_type_node so gdb can distinguish them. */ #define vfunc_ptr_type_node vtable_entry_type /* For building calls to `delete'. */ extern GTY(()) tree integer_two_node; /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ extern int function_depth; /* In parser.c. */ /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. This is a count since operands to sizeof can be nested. */ extern int cp_unevaluated_operand; extern tree cp_convert_range_for (tree, tree, tree); /* in pt.c */ /* These values are used for the `STRICT' parameter to type_unification and fn_type_unification. Their meanings are described with the documentation for fn_type_unification. */ typedef enum unification_kind_t { DEDUCE_CALL, DEDUCE_CONV, DEDUCE_EXACT } unification_kind_t; /* in class.c */ extern int current_class_depth; /* An array of all local classes present in this translation unit, in declaration order. */ extern GTY(()) VEC(tree,gc) *local_classes; /* Here's where we control how name mangling takes place. */ /* Cannot use '$' up front, because this confuses gdb (names beginning with '$' are gdb-local identifiers). Note that all forms in which the '$' is significant are long enough for direct indexing (meaning that if we know there is a '$' at a particular location, we can index into the string at any other location that provides distinguishing characters). */ /* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler doesn't allow '.' in symbol names. */ #ifndef NO_DOT_IN_LABEL #define JOINER '.' #define AUTO_TEMP_NAME "_.tmp_" #define VFIELD_BASE ".vf" #define VFIELD_NAME "_vptr." #define VFIELD_NAME_FORMAT "_vptr.%s" #define ANON_AGGRNAME_FORMAT "._%d" #else /* NO_DOT_IN_LABEL */ #ifndef NO_DOLLAR_IN_LABEL #define JOINER '$' #define AUTO_TEMP_NAME "_$tmp_" #define VFIELD_BASE "$vf" #define VFIELD_NAME "_vptr$" #define VFIELD_NAME_FORMAT "_vptr$%s" #define ANON_AGGRNAME_FORMAT "$_%d" #else /* NO_DOLLAR_IN_LABEL */ #define IN_CHARGE_NAME "__in_chrg" #define AUTO_TEMP_NAME "__tmp_" #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \ sizeof (AUTO_TEMP_NAME) - 1)) #define VTABLE_NAME "__vt_" #define VTABLE_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ sizeof (VTABLE_NAME) - 1)) #define VFIELD_BASE "__vfb" #define VFIELD_NAME "__vptr_" #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ sizeof (VFIELD_NAME) - 1)) #define VFIELD_NAME_FORMAT "__vptr_%s" #define ANON_AGGRNAME_PREFIX "__anon_" #define ANON_AGGRNAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), ANON_AGGRNAME_PREFIX, \ sizeof (ANON_AGGRNAME_PREFIX) - 1)) #define ANON_AGGRNAME_FORMAT "__anon_%d" #endif /* NO_DOLLAR_IN_LABEL */ #endif /* NO_DOT_IN_LABEL */ #define THIS_NAME "this" #define IN_CHARGE_NAME "__in_chrg" #define VTBL_PTR_TYPE "__vtbl_ptr_type" #define VTABLE_DELTA_NAME "__delta" #define VTABLE_PFN_NAME "__pfn" #define LAMBDANAME_PREFIX "__lambda" #define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d" #define LAMBDANAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ LAMBDANAME_PREFIX, \ sizeof (LAMBDANAME_PREFIX) - 1)) #if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) #define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1)) #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) /* For anonymous aggregate types, we need some sort of name to hold on to. In practice, this should not appear, but it should not be harmful if it does. */ #define ANON_AGGRNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == JOINER \ && IDENTIFIER_POINTER (ID_NODE)[1] == '_') #endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ /* Nonzero if we're done parsing and into end-of-file activities. */ extern int at_eof; /* A list of namespace-scope objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ extern GTY(()) tree static_aggregates; enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG }; /* These are uses as bits in flags passed to various functions to control their behavior. Despite the LOOKUP_ prefix, many of these do not control name lookup. ??? Functions using these flags should probably be modified to accept explicit boolean flags for the behaviors relevant to them. */ /* Check for access violations. */ #define LOOKUP_PROTECT (1 << 0) /* Complain if no suitable member function matching the arguments is found. */ #define LOOKUP_COMPLAIN (1 << 1) #define LOOKUP_NORMAL (LOOKUP_PROTECT | LOOKUP_COMPLAIN) /* Even if the function found by lookup is a virtual function, it should be called directly. */ #define LOOKUP_NONVIRTUAL (1 << 2) /* Non-converting (i.e., "explicit") constructors are not tried. This flag indicates that we are not performing direct-initialization. */ #define LOOKUP_ONLYCONVERTING (1 << 3) #define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING) /* If a temporary is created, it should be created so that it lives as long as the current variable bindings; otherwise it only lives until the end of the complete-expression. It also forces direct-initialization in cases where other parts of the compiler have already generated a temporary, such as reference initialization and the catch parameter. */ #define DIRECT_BIND (1 << 4) /* We're performing a user-defined conversion, so more user-defined conversions are not permitted (only built-in conversions). */ #define LOOKUP_NO_CONVERSION (1 << 5) /* The user has explicitly called a destructor. (Therefore, we do not need to check that the object is non-NULL before calling the destructor.) */ #define LOOKUP_DESTRUCTOR (1 << 6) /* Do not permit references to bind to temporaries. */ #define LOOKUP_NO_TEMP_BIND (1 << 7) /* Do not accept objects, and possibly namespaces. */ #define LOOKUP_PREFER_TYPES (1 << 8) /* Do not accept objects, and possibly types. */ #define LOOKUP_PREFER_NAMESPACES (1 << 9) /* Accept types or namespaces. */ #define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES) /* Return friend declarations and un-declared builtin functions. (Normally, these entities are registered in the symbol table, but not found by lookup.) */ #define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1) /* Prefer that the lvalue be treated as an rvalue. */ #define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1) /* We're inside an init-list, so narrowing conversions are ill-formed. */ #define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1) /* Avoid user-defined conversions for the first parameter of a copy constructor (or move constructor). */ #define LOOKUP_NO_COPY_CTOR_CONVERSION (LOOKUP_NO_NARROWING << 1) /* This is the first parameter of a copy constructor. */ #define LOOKUP_COPY_PARM (LOOKUP_NO_COPY_CTOR_CONVERSION << 1) /* We only want to consider list constructors. */ #define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1) /* Return after determining which function to call and checking access. Used by sythesized_method_walk to determine which functions will be called to initialize subobjects, in order to determine exception specification and possible implicit delete. This is kind of a hack, but since access control doesn't respect SFINAE we can't just use tf_none to avoid access control errors, we need another mechanism. Exiting early also avoids problems with trying to perform argument conversions when the class isn't complete yet. */ #define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1) /* Used by calls from defaulted functions to limit the overload set to avoid cycles trying to declare them (core issue 1092). */ #define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1) /* Used in calls to store_init_value to suppress its usual call to digest_init. */ #define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1) #define LOOKUP_NAMESPACES_ONLY(F) \ (((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_TYPES_ONLY(F) \ (!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH) /* These flags are used by the conversion code. CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). CONV_STATIC : Perform the explicit conversions for static_cast. CONV_CONST : Perform the explicit conversions for const_cast. CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. CONV_PRIVATE : Perform upcasts to private bases. CONV_FORCE_TEMP : Require a new temporary when converting to the same aggregate type. */ #define CONV_IMPLICIT 1 #define CONV_STATIC 2 #define CONV_CONST 4 #define CONV_REINTERPRET 8 #define CONV_PRIVATE 16 /* #define CONV_NONCONVERTING 32 */ #define CONV_FORCE_TEMP 64 #define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET) #define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) /* Used by build_expr_type_conversion to indicate which types are acceptable as arguments to the expression under consideration. */ #define WANT_INT 1 /* integer types, including bool */ #define WANT_FLOAT 2 /* floating point types */ #define WANT_ENUM 4 /* enumerated types */ #define WANT_POINTER 8 /* pointer types */ #define WANT_NULL 16 /* null pointer constant */ #define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */ #define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX) /* Used with comptypes, and related functions, to guide type comparison. */ #define COMPARE_STRICT 0 /* Just check if the types are the same. */ #define COMPARE_BASE 1 /* Check to see if the second type is derived from the first. */ #define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in reverse. */ #define COMPARE_REDECLARATION 4 /* The comparison is being done when another declaration of an existing entity is seen. */ #define COMPARE_STRUCTURAL 8 /* The comparison is intended to be structural. The actual comparison will be identical to COMPARE_STRICT. */ /* Used with push_overloaded_decl. */ #define PUSH_GLOBAL 0 /* Push the DECL into namespace scope, regardless of the current scope. */ #define PUSH_LOCAL 1 /* Push the DECL into the current scope. */ #define PUSH_USING 2 /* We are pushing this DECL as the result of a using declaration. */ /* Used with start function. */ #define SF_DEFAULT 0 /* No flags. */ #define SF_PRE_PARSED 1 /* The function declaration has already been parsed. */ #define SF_INCLASS_INLINE 2 /* The function is an inline, defined in the class body. */ /* Used with start_decl's initialized parameter. */ #define SD_UNINITIALIZED 0 #define SD_INITIALIZED 1 #define SD_DEFAULTED 2 #define SD_DELETED 3 /* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 is derived from TYPE1, or if TYPE2 is a pointer (reference) to a class derived from the type pointed to (referred to) by TYPE1. */ #define same_or_base_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_BASE) /* These macros are used to access a TEMPLATE_PARM_INDEX. */ #define TEMPLATE_PARM_INDEX_CAST(NODE) \ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) #define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) #define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) /* The Number of sibling parms this template parm has. */ #define TEMPLATE_PARM_NUM_SIBLINGS(NODE) \ (TEMPLATE_PARM_INDEX_CAST (NODE)->num_siblings) #define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) #define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) #define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) #define TEMPLATE_PARM_PARAMETER_PACK(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE))) /* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ #define TEMPLATE_TYPE_PARM_INDEX(NODE) \ (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM, \ BOUND_TEMPLATE_TEMPLATE_PARM))->type.values #define TEMPLATE_TYPE_IDX(NODE) \ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_LEVEL(NODE) \ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_DECL(NODE) \ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \ (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE))) /* These constants can used as bit flags in the process of tree formatting. TFF_PLAIN_IDENTIFIER: unqualified part of a name. TFF_SCOPE: include the class and namespace scope of the name. TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. TFF_DECL_SPECIFIERS: print decl-specifiers. TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with a class-key (resp. `enum'). TFF_RETURN_TYPE: include function return type. TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. TFF_EXCEPTION_SPECIFICATION: show function exception specification. TFF_TEMPLATE_HEADER: show the template<...> header in a template-declaration. TFF_TEMPLATE_NAME: show only template-name. TFF_EXPR_IN_PARENS: parenthesize expressions. TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the top-level entity. TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments identical to their defaults. */ #define TFF_PLAIN_IDENTIFIER (0) #define TFF_SCOPE (1) #define TFF_CHASE_TYPEDEF (1 << 1) #define TFF_DECL_SPECIFIERS (1 << 2) #define TFF_CLASS_KEY_OR_ENUM (1 << 3) #define TFF_RETURN_TYPE (1 << 4) #define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) #define TFF_EXCEPTION_SPECIFICATION (1 << 6) #define TFF_TEMPLATE_HEADER (1 << 7) #define TFF_TEMPLATE_NAME (1 << 8) #define TFF_EXPR_IN_PARENS (1 << 9) #define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) #define TFF_UNQUALIFIED_NAME (1 << 11) #define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12) /* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM node. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TYPE_TI_TEMPLATE (NODE) \ : TYPE_NAME (NODE)) /* in lex.c */ extern void init_reswords (void); typedef struct GTY(()) operator_name_info_t { /* The IDENTIFIER_NODE for the operator. */ tree identifier; /* The name of the operator. */ const char *name; /* The mangled name of the operator. */ const char *mangled_name; /* The arity of the operator. */ int arity; } operator_name_info_t; /* A mapping from tree codes to operator name information. */ extern GTY(()) operator_name_info_t operator_name_info [(int) MAX_TREE_CODES]; /* Similar, but for assignment operators. */ extern GTY(()) operator_name_info_t assignment_operator_name_info [(int) MAX_TREE_CODES]; /* A type-qualifier, or bitmask therefore, using the TYPE_QUAL constants. */ typedef int cp_cv_quals; /* A storage class. */ typedef enum cp_storage_class { /* sc_none must be zero so that zeroing a cp_decl_specifier_seq sets the storage_class field to sc_none. */ sc_none = 0, sc_auto, sc_register, sc_static, sc_extern, sc_mutable } cp_storage_class; /* An individual decl-specifier. */ typedef enum cp_decl_spec { ds_first, ds_signed = ds_first, ds_unsigned, ds_short, ds_long, ds_const, ds_volatile, ds_restrict, ds_inline, ds_virtual, ds_explicit, ds_friend, ds_typedef, ds_constexpr, ds_complex, ds_thread, ds_last } cp_decl_spec; /* A decl-specifier-seq. */ typedef struct cp_decl_specifier_seq { /* The number of times each of the keywords has been seen. */ unsigned specs[(int) ds_last]; /* The location of the primary type. Mainly used for error reporting. */ location_t type_location; /* The primary type, if any, given by the decl-specifier-seq. Modifiers, like "short", "const", and "unsigned" are not reflected here. This field will be a TYPE, unless a typedef-name was used, in which case it will be a TYPE_DECL. */ tree type; /* The attributes, if any, provided with the specifier sequence. */ tree attributes; /* If non-NULL, a built-in type that the user attempted to redefine to some other type. */ tree redefined_builtin_type; /* The storage class specified -- or sc_none if no storage class was explicitly specified. */ cp_storage_class storage_class; /* True iff TYPE_SPEC indicates a user-defined type. */ BOOL_BITFIELD user_defined_type_p : 1; /* True iff multiple types were (erroneously) specified for this decl-specifier-seq. */ BOOL_BITFIELD multiple_types_p : 1; /* True iff multiple storage classes were (erroneously) specified for this decl-specifier-seq or a combination of a storage class with a typedef specifier. */ BOOL_BITFIELD conflicting_specifiers_p : 1; /* True iff at least one decl-specifier was found. */ BOOL_BITFIELD any_specifiers_p : 1; /* True iff at least one type-specifier was found. */ BOOL_BITFIELD any_type_specifiers_p : 1; /* True iff "int" was explicitly provided. */ BOOL_BITFIELD explicit_int_p : 1; /* True iff "__int128" was explicitly provided. */ BOOL_BITFIELD explicit_int128_p : 1; /* True iff "char" was explicitly provided. */ BOOL_BITFIELD explicit_char_p : 1; } cp_decl_specifier_seq; /* The various kinds of declarators. */ typedef enum cp_declarator_kind { cdk_id, cdk_function, cdk_array, cdk_pointer, cdk_reference, cdk_ptrmem, cdk_error } cp_declarator_kind; /* A declarator. */ typedef struct cp_declarator cp_declarator; typedef struct cp_parameter_declarator cp_parameter_declarator; /* A parameter, before it has been semantically analyzed. */ struct cp_parameter_declarator { /* The next parameter, or NULL_TREE if none. */ cp_parameter_declarator *next; /* The decl-specifiers-seq for the parameter. */ cp_decl_specifier_seq decl_specifiers; /* The declarator for the parameter. */ cp_declarator *declarator; /* The default-argument expression, or NULL_TREE, if none. */ tree default_argument; /* True iff this is the first parameter in the list and the parameter sequence ends with an ellipsis. */ bool ellipsis_p; }; /* A declarator. */ struct cp_declarator { /* The kind of declarator. */ ENUM_BITFIELD (cp_declarator_kind) kind : 4; /* Whether we parsed an ellipsis (`...') just before the declarator, to indicate this is a parameter pack. */ BOOL_BITFIELD parameter_pack_p : 1; location_t id_loc; /* Currently only set for cdk_id and cdk_function. */ /* Attributes that apply to this declarator. */ tree attributes; /* For all but cdk_id and cdk_error, the contained declarator. For cdk_id and cdk_error, guaranteed to be NULL. */ cp_declarator *declarator; union { /* For identifiers. */ struct { /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or *_TYPE) for this identifier. */ tree qualifying_scope; /* The unqualified name of the entity -- an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_name; /* If this is the name of a function, what kind of special function (if any). */ special_function_kind sfk; } id; /* For functions. */ struct { /* The parameters to the function as a TREE_LIST of decl/default. */ tree parameters; /* The cv-qualifiers for the function. */ cp_cv_quals qualifiers; /* The exception-specification for the function. */ tree exception_specification; /* The late-specified return type, if any. */ tree late_return_type; } function; /* For arrays. */ struct { /* The bounds to the array. */ tree bounds; } array; /* For cdk_pointer and cdk_ptrmem. */ struct { /* The cv-qualifiers for the pointer. */ cp_cv_quals qualifiers; /* For cdk_ptrmem, the class type containing the member. */ tree class_type; } pointer; /* For cdk_reference */ struct { /* The cv-qualifiers for the reference. These qualifiers are only used to diagnose ill-formed code. */ cp_cv_quals qualifiers; /* Whether this is an rvalue reference */ bool rvalue_ref; } reference; } u; }; /* A level of template instantiation. */ struct GTY(()) tinst_level { /* The immediately deeper level in the chain. */ struct tinst_level *next; /* The original node. Can be either a DECL (for a function or static data member) or a TYPE (for a class), depending on what we were asked to instantiate. */ tree decl; /* The location where the template is instantiated. */ location_t locus; /* True if the location is in a system header. */ bool in_system_header_p; }; /* A parameter list indicating for a function with no parameters, e.g "int f(void)". */ extern cp_parameter_declarator *no_parameters; /* True if we saw "#pragma GCC java_exceptions". */ extern bool pragma_java_exceptions; /* in call.c */ extern bool check_dtor_name (tree, tree); extern tree build_conditional_expr (tree, tree, tree, tsubst_flags_t); extern tree build_addr_func (tree); extern tree build_call_a (tree, int, tree*); extern tree build_call_n (tree, int, ...); extern bool null_ptr_cst_p (tree); extern bool sufficient_parms_p (const_tree); extern tree type_decays_to (tree); extern tree build_user_type_conversion (tree, tree, int); extern tree build_new_function_call (tree, VEC(tree,gc) **, bool, tsubst_flags_t); extern tree build_operator_new_call (tree, VEC(tree,gc) **, tree *, tree *, tree *); extern tree build_new_method_call (tree, tree, VEC(tree,gc) **, tree, int, tree *, tsubst_flags_t); extern tree build_special_member_call (tree, tree, VEC(tree,gc) **, tree, int, tsubst_flags_t); extern tree build_new_op (enum tree_code, int, tree, tree, tree, bool *, tsubst_flags_t); extern tree build_op_call (tree, VEC(tree,gc) **, tsubst_flags_t); extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree); extern bool can_convert (tree, tree); extern bool can_convert_arg (tree, tree, tree, int); extern bool can_convert_arg_bad (tree, tree, tree, int); extern bool enforce_access (tree, tree, tree); extern tree convert_default_arg (tree, tree, tree, int); extern tree convert_arg_to_ellipsis (tree); extern tree build_x_va_arg (tree, tree); extern tree cxx_type_promotes_to (tree); extern tree type_passed_as (tree); extern tree convert_for_arg_passing (tree, tree); extern bool is_properly_derived_from (tree, tree); extern tree set_up_extended_ref_temp (tree, tree, tree *, tree *); extern tree initialize_reference (tree, tree, tree, tree *, tsubst_flags_t); extern tree make_temporary_var_for_ref_to_temp (tree, tree); extern tree strip_top_quals (tree); extern bool reference_related_p (tree, tree); extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t); extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int); extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t); extern tree perform_direct_initialization_if_possible (tree, tree, bool, tsubst_flags_t); extern tree in_charge_arg_for_name (tree); extern tree build_cxx_call (tree, int, tree *); extern bool is_std_init_list (tree); extern bool is_list_ctor (tree); #ifdef ENABLE_CHECKING extern void validate_conversion_obstack (void); #endif /* ENABLE_CHECKING */ /* in class.c */ extern tree build_vfield_ref (tree, tree); extern tree build_base_path (enum tree_code, tree, tree, int); extern tree convert_to_base (tree, tree, bool, bool, tsubst_flags_t); extern tree convert_to_base_statically (tree, tree); extern tree build_vtbl_ref (tree, tree); extern tree build_vfn_ref (tree, tree); extern tree get_vtable_decl (tree, int); extern void resort_type_method_vec (void *, void *, gt_pointer_operator, void *); extern bool add_method (tree, tree, tree); extern bool currently_open_class (tree); extern tree currently_open_derived_class (tree); extern tree current_nonlambda_class_type (void); extern tree finish_struct (tree, tree); extern void finish_struct_1 (tree); extern int resolves_to_fixed_type_p (tree, int *); extern void init_class_processing (void); extern int is_empty_class (tree); extern bool is_really_empty_class (tree); extern void pushclass (tree); extern void popclass (void); extern void push_nested_class (tree); extern void pop_nested_class (void); extern int current_lang_depth (void); extern void push_lang_context (tree); extern void pop_lang_context (void); extern tree instantiate_type (tree, tree, tsubst_flags_t); extern void print_class_statistics (void); extern void build_self_reference (void); extern int same_signature_p (const_tree, const_tree); extern void maybe_add_class_template_decl_list (tree, tree, int); extern void unreverse_member_declarations (tree); extern void invalidate_class_lookup_cache (void); extern void maybe_note_name_used_in_class (tree, tree); extern void note_name_declared_in_class (tree, tree); extern tree get_vtbl_decl_for_binfo (tree); extern void debug_class (tree); extern void debug_thunks (tree); extern tree cp_fold_obj_type_ref (tree, tree); extern void set_linkage_according_to_type (tree, tree); extern void determine_key_method (tree); extern void check_for_override (tree, tree); extern void push_class_stack (void); extern void pop_class_stack (void); extern bool type_has_user_nondefault_constructor (tree); extern tree in_class_defaulted_default_constructor (tree); extern bool user_provided_p (tree); extern bool type_has_user_provided_constructor (tree); extern bool type_has_user_provided_default_constructor (tree); extern bool synthesized_default_constructor_is_constexpr (tree); extern bool type_has_constexpr_default_constructor (tree); extern bool type_has_virtual_destructor (tree); extern bool type_has_move_constructor (tree); extern bool type_has_move_assign (tree); extern void defaulted_late_check (tree); extern bool defaultable_fn_check (tree); extern void fixup_type_variants (tree); extern void fixup_attribute_variants (tree); extern tree* decl_cloned_function_p (const_tree, bool); extern void clone_function_decl (tree, int); extern void adjust_clone_args (tree); /* in cvt.c */ extern tree convert_to_reference (tree, tree, int, int, tree); extern tree convert_from_reference (tree); extern tree force_rvalue (tree); extern tree ocp_convert (tree, tree, int, int); extern tree cp_convert (tree, tree); extern tree cp_convert_and_check (tree, tree); extern tree cp_fold_convert (tree, tree); extern tree convert_to_void (tree, impl_conv_void, tsubst_flags_t); extern tree convert_force (tree, tree, int); extern tree build_expr_type_conversion (int, tree, bool); extern tree type_promotes_to (tree); extern tree perform_qualification_conversions (tree, tree); /* in name-lookup.c */ extern tree pushdecl (tree); extern tree pushdecl_maybe_friend (tree, bool); extern void maybe_push_cleanup_level (tree); extern tree pushtag (tree, tree, tag_scope); extern tree make_anon_name (void); extern tree pushdecl_top_level_maybe_friend (tree, bool); extern tree pushdecl_top_level_and_finish (tree, tree); extern tree check_for_out_of_scope_variable (tree); extern void print_other_binding_stack (struct cp_binding_level *); extern tree maybe_push_decl (tree); extern tree current_decl_namespace (void); /* decl.c */ extern tree poplevel (int, int, int); extern void cxx_init_decl_processing (void); enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node *); extern void finish_scope (void); extern void push_switch (tree); extern void pop_switch (void); extern tree make_lambda_name (void); extern int decls_match (tree, tree); extern tree duplicate_decls (tree, tree, bool); extern tree declare_local_label (tree); extern tree define_label (location_t, tree); extern void check_goto (tree); extern bool check_omp_return (void); extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); extern tree build_library_fn_ptr (const char *, tree); extern tree build_cp_library_fn_ptr (const char *, tree); extern tree push_library_fn (tree, tree, tree); extern tree push_void_library_fn (tree, tree); extern tree push_throw_library_fn (tree, tree); extern tree check_tag_decl (cp_decl_specifier_seq *); extern tree shadow_tag (cp_decl_specifier_seq *); extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool); extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); extern void start_decl_1 (tree, bool); extern bool check_array_initializer (tree, tree, tree); extern void cp_finish_decl (tree, tree, bool, tree, int); extern int cp_complete_array_type (tree *, tree, bool); extern tree build_ptrmemfunc_type (tree); extern tree build_ptrmem_type (tree, tree); /* the grokdeclarator prototype is in decl.h */ extern tree build_this_parm (tree, cp_cv_quals); extern int copy_fn_p (const_tree); extern bool move_fn_p (const_tree); extern tree get_scope_of_declarator (const cp_declarator *); extern void grok_special_member_properties (tree); extern int grok_ctor_properties (const_tree, const_tree); extern bool grok_op_properties (tree, bool); extern tree xref_tag (enum tag_types, tree, tag_scope, bool); extern tree xref_tag_from_type (tree, tree, tag_scope); extern bool xref_basetypes (tree, tree); extern tree start_enum (tree, tree, tree, bool, bool *); extern void finish_enum_value_list (tree); extern void finish_enum (tree); extern void build_enumerator (tree, tree, tree, location_t); extern tree lookup_enumerator (tree, tree); extern void start_preparsed_function (tree, tree, int); extern int start_function (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree begin_function_body (void); extern void finish_function_body (tree); extern tree outer_curly_brace_block (tree); extern tree finish_function (int); extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree); extern void maybe_register_incomplete_var (tree); extern void maybe_commonize_var (tree); extern void complete_vars (tree); extern void finish_stmt (void); extern tree static_fn_type (tree); extern void revert_static_member_fn (tree); extern void fixup_anonymous_aggr (tree); extern int check_static_variable_definition (tree, tree); extern tree compute_array_index_type (tree, tree, tsubst_flags_t); extern tree check_default_argument (tree, tree); typedef int (*walk_namespaces_fn) (tree, void *); extern int walk_namespaces (walk_namespaces_fn, void *); extern int wrapup_globals_for_namespace (tree, void *); extern tree create_implicit_typedef (tree, tree); extern int local_variable_p (const_tree); extern tree register_dtor_fn (tree); extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tree cp_fname_init (const char *, tree *); extern tree cxx_builtin_function (tree decl); extern tree cxx_builtin_function_ext_scope (tree decl); extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); extern void warn_extern_redeclared_static (tree, tree); extern tree cxx_comdat_group (tree); extern bool cp_missing_noreturn_ok_p (tree); extern void initialize_artificial_var (tree, VEC(constructor_elt,gc) *); extern tree check_var_type (tree, tree); extern tree reshape_init (tree, tree); extern tree next_initializable_field (tree); extern bool defer_mark_used_calls; extern GTY(()) VEC(tree, gc) *deferred_mark_used_calls; extern tree finish_case_label (location_t, tree, tree); extern tree cxx_maybe_build_cleanup (tree); /* in decl2.c */ extern bool check_java_method (tree); extern tree build_memfn_type (tree, tree, cp_cv_quals); extern tree change_return_type (tree, tree); extern void maybe_retrofit_in_chrg (tree); extern void maybe_make_one_only (tree); extern bool vague_linkage_p (tree); extern void grokclassfn (tree, tree, enum overload_flags); extern tree grok_array_decl (tree, tree); extern tree delete_sanity (tree, tree, bool, int); extern tree check_classfn (tree, tree, tree); extern void check_member_template (tree); extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, tree, bool, tree, tree); extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, tree, tree); extern tree cp_reconstruct_complex_type (tree, tree); extern void cplus_decl_attributes (tree *, tree, int); extern void finish_anon_union (tree); extern void cp_write_global_declarations (void); extern tree coerce_new_type (tree); extern tree coerce_delete_type (tree); extern void comdat_linkage (tree); extern void determine_visibility (tree); extern void constrain_class_visibility (tree); extern void import_export_decl (tree); extern tree build_cleanup (tree); extern tree build_offset_ref_call_from_tree (tree, VEC(tree,gc) **); extern bool decl_constant_var_p (tree); extern bool decl_maybe_constant_var_p (tree); extern void check_default_args (tree); extern void mark_used (tree); extern void finish_static_data_member_decl (tree, tree, bool, tree, int); extern tree cp_build_parm_decl (tree, tree); extern tree get_guard (tree); extern tree get_guard_cond (tree); extern tree set_guard (tree); extern tree cxx_callgraph_analyze_expr (tree *, int *); extern void mark_needed (tree); extern bool decl_needed_p (tree); extern void note_vague_linkage_fn (tree); extern tree build_artificial_parm (tree, tree); extern bool possibly_inlined_p (tree); extern int parm_index (tree); /* in error.c */ extern void init_error (void); extern const char *type_as_string (tree, int); extern const char *type_as_string_translate (tree, int); extern const char *decl_as_string (tree, int); extern const char *decl_as_string_translate (tree, int); extern const char *expr_as_string (tree, int); extern const char *lang_decl_name (tree, int, bool); extern const char *language_to_string (enum languages); extern const char *class_key_or_enum_as_string (tree); extern void print_instantiation_context (void); extern void maybe_warn_variadic_templates (void); extern void maybe_warn_cpp0x (cpp0x_warn_str str); extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern location_t location_of (tree); extern void qualified_name_lookup_error (tree, tree, tree, location_t); /* in except.c */ extern void init_exception_processing (void); extern tree expand_start_catch_block (tree); extern void expand_end_catch_block (void); extern tree build_exc_ptr (void); extern tree build_throw (tree); extern int nothrow_libfn_p (const_tree); extern void check_handlers (tree); extern tree finish_noexcept_expr (tree, tsubst_flags_t); extern void perform_deferred_noexcept_checks (void); extern bool nothrow_spec_p (const_tree); extern bool type_noexcept_p (const_tree); extern bool type_throw_all_p (const_tree); extern tree build_noexcept_spec (tree, int); extern void choose_personality_routine (enum languages); extern tree eh_type_info (tree); extern tree begin_eh_spec_block (void); extern void finish_eh_spec_block (tree, tree); extern tree build_eh_type_type (tree); extern tree cp_protect_cleanup_actions (void); /* in expr.c */ extern tree cplus_expand_constant (tree); extern tree mark_rvalue_use (tree); extern tree mark_lvalue_use (tree); extern tree mark_type_use (tree); extern void mark_exp_read (tree); /* friend.c */ extern int is_friend (tree, tree); extern void make_friend_class (tree, tree, bool); extern void add_friend (tree, tree, bool); extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool); /* in init.c */ extern tree expand_member_init (tree); extern void emit_mem_initializers (tree); extern tree build_aggr_init (tree, tree, int, tsubst_flags_t); extern int is_class_type (tree, int); extern tree get_type_value (tree); extern tree build_zero_init (tree, tree, bool); extern tree build_value_init (tree, tsubst_flags_t); extern tree build_value_init_noctor (tree, tsubst_flags_t); extern tree build_offset_ref (tree, tree, bool); extern tree build_new (VEC(tree,gc) **, tree, tree, VEC(tree,gc) **, int, tsubst_flags_t); extern tree get_temp_regvar (tree, tree); extern tree build_vec_init (tree, tree, tree, bool, int, tsubst_flags_t); extern tree build_delete (tree, tree, special_function_kind, int, int); extern void push_base_cleanups (void); extern tree build_vec_delete (tree, tree, special_function_kind, int); extern tree create_temporary_var (tree); extern void initialize_vtbl_ptrs (tree); extern tree build_java_class_ref (tree); extern tree integral_constant_value (tree); extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool); /* in lex.c */ extern void cxx_dup_lang_specific_decl (tree); extern void yyungetc (int, int); extern tree unqualified_name_lookup_error (tree); extern tree unqualified_fn_lookup_error (tree); extern tree build_lang_decl (enum tree_code, tree, tree); extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree); extern void retrofit_lang_decl (tree); extern tree copy_decl (tree); extern tree copy_type (tree); extern tree cxx_make_type (enum tree_code); extern tree make_class_type (enum tree_code); extern bool cxx_init (void); extern void cxx_finish (void); extern bool in_main_input_context (void); /* in method.c */ extern void init_method (void); extern tree make_thunk (tree, bool, tree, tree); extern void finish_thunk (tree); extern void use_thunk (tree, bool); extern bool trivial_fn_p (tree); extern bool maybe_explain_implicit_delete (tree); extern void synthesize_method (tree); extern tree lazily_declare_fn (special_function_kind, tree); extern tree skip_artificial_parms_for (const_tree, tree); extern int num_artificial_parms_for (const_tree); extern tree make_alias_for (tree, tree); extern tree get_copy_ctor (tree, tsubst_flags_t); extern tree get_copy_assign (tree); extern tree get_default_ctor (tree); extern tree get_dtor (tree, tsubst_flags_t); extern tree locate_ctor (tree); /* In optimize.c */ extern bool maybe_clone_body (tree); /* in pt.c */ extern bool check_template_shadow (tree); extern tree get_innermost_template_args (tree, int); extern void maybe_begin_member_template_processing (tree); extern void maybe_end_member_template_processing (void); extern tree finish_member_template_decl (tree); extern void begin_template_parm_list (void); extern bool begin_specialization (void); extern void reset_specialization (void); extern void end_specialization (void); extern void begin_explicit_instantiation (void); extern void end_explicit_instantiation (void); extern tree check_explicit_specialization (tree, tree, int, int); extern tree make_auto (void); extern tree do_auto_deduction (tree, tree, tree); extern tree type_uses_auto (tree); extern void append_type_to_template_for_access_check (tree, tree, tree, location_t); extern tree splice_late_return_type (tree, tree); extern bool is_auto (const_tree); extern tree process_template_parm (tree, location_t, tree, bool, bool, unsigned); extern tree end_template_parm_list (tree); void fixup_template_parms (void); extern void end_template_decl (void); extern tree maybe_update_decl_type (tree, tree); extern bool check_default_tmpl_args (tree, tree, int, int, int); extern tree push_template_decl (tree); extern tree push_template_decl_real (tree, bool); extern bool redeclare_class_template (tree, tree); extern tree lookup_template_class (tree, tree, tree, tree, int, tsubst_flags_t); extern tree lookup_template_function (tree, tree); extern int uses_template_parms (tree); extern int uses_template_parms_level (tree, int); extern tree instantiate_class_template (tree); extern tree instantiate_template (tree, tree, tsubst_flags_t); extern int fn_type_unification (tree, tree, tree, const tree *, unsigned int, tree, unification_kind_t, int); extern void mark_decl_instantiated (tree, int); extern int more_specialized_fn (tree, tree, int); extern void do_decl_instantiation (tree, tree); extern void do_type_instantiation (tree, tree, tsubst_flags_t); extern bool always_instantiate_p (tree); extern tree instantiate_decl (tree, int, bool); extern int comp_template_parms (const_tree, const_tree); extern bool uses_parameter_packs (tree); extern bool template_parameter_pack_p (const_tree); extern bool function_parameter_pack_p (const_tree); extern bool function_parameter_expanded_from_pack_p (tree, tree); extern tree make_pack_expansion (tree); extern bool check_for_bare_parameter_packs (tree); extern tree build_template_info (tree, tree); extern tree get_template_info (const_tree); extern VEC(qualified_typedef_usage_t,gc)* get_types_needing_access_check (tree); extern int template_class_depth (tree); extern int is_specialization_of (tree, tree); extern bool is_specialization_of_friend (tree, tree); extern tree get_pattern_parm (tree, tree); extern int comp_template_args (tree, tree); extern tree maybe_process_partial_specialization (tree); extern tree most_specialized_instantiation (tree); extern void print_candidates (tree); extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, tree, tree); extern tree tsubst (tree, tree, tsubst_flags_t, tree); extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree, bool, bool); extern tree most_general_template (tree); extern tree get_mostly_instantiated_function_type (tree); extern int problematic_instantiation_changed (void); extern void record_last_problematic_instantiation (void); extern struct tinst_level *current_instantiation(void); extern tree maybe_get_template_decl_from_type_decl (tree); extern int processing_template_parmlist; extern bool dependent_type_p (tree); extern bool dependent_scope_p (tree); extern bool any_dependent_template_arguments_p (const_tree); extern bool dependent_template_p (tree); extern bool dependent_template_id_p (tree, tree); extern bool type_dependent_expression_p (tree); extern bool any_type_dependent_arguments_p (const VEC(tree,gc) *); extern bool type_dependent_expression_p_push (tree); extern bool value_dependent_expression_p (tree); extern bool any_value_dependent_elements_p (const_tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); extern tree build_non_dependent_expr (tree); extern void make_args_non_dependent (VEC(tree,gc) *); extern bool reregister_specialization (tree, tree, tree); extern tree fold_non_dependent_expr (tree); extern bool explicit_class_specialization_p (tree); extern int push_tinst_level (tree); extern void pop_tinst_level (void); extern struct tinst_level *outermost_tinst_level(void); extern bool parameter_of_template_p (tree, tree); extern void init_template_processing (void); extern void print_template_statistics (void); bool template_template_parameter_p (const_tree); extern bool primary_template_instantiation_p (const_tree); extern tree get_primary_template_innermost_parameters (const_tree); extern tree get_template_parms_at_level (tree, int); extern tree get_template_innermost_arguments (const_tree); extern tree get_template_argument_pack_elems (const_tree); extern tree get_function_template_decl (const_tree); extern tree resolve_nondeduced_context (tree); extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val); /* in repo.c */ extern void init_repo (void); extern int repo_emit_p (tree); extern bool repo_export_class_p (const_tree); extern void finish_repo (void); /* in rtti.c */ /* A vector of all tinfo decls that haven't been emitted yet. */ extern GTY(()) VEC(tree,gc) *unemitted_tinfo_decls; extern void init_rtti_processing (void); extern tree build_typeid (tree); extern tree get_tinfo_decl (tree); extern tree get_typeid (tree); extern tree build_headof (tree); extern tree build_dynamic_cast (tree, tree, tsubst_flags_t); extern void emit_support_tinfos (void); extern bool emit_tinfo_decl (tree); /* in search.c */ extern bool accessible_base_p (tree, tree, bool); extern tree lookup_base (tree, tree, base_access, base_kind *); extern tree dcast_base_hint (tree, tree); extern int accessible_p (tree, tree, bool); extern tree lookup_field_1 (tree, tree, bool); extern tree lookup_field (tree, tree, int, bool); extern int lookup_fnfields_1 (tree, tree); extern tree lookup_fnfields_slot (tree, tree); extern int class_method_index_for_fn (tree, tree); extern tree lookup_fnfields (tree, tree, int); extern tree lookup_member (tree, tree, int, bool); extern int look_for_overrides (tree, tree); extern void get_pure_virtuals (tree); extern void maybe_suppress_debug_info (tree); extern void note_debug_info_needed (tree); extern void print_search_statistics (void); extern void reinit_search_statistics (void); extern tree current_scope (void); extern int at_function_scope_p (void); extern bool at_class_scope_p (void); extern bool at_namespace_scope_p (void); extern tree context_for_name_lookup (tree); extern tree lookup_conversions (tree); extern tree binfo_from_vbase (tree); extern tree binfo_for_vbase (tree, tree); extern tree look_for_overrides_here (tree, tree); #define dfs_skip_bases ((tree)1) extern tree dfs_walk_all (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree dfs_walk_once (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree binfo_via_virtual (tree, tree); extern tree build_baselink (tree, tree, tree, tree); extern tree adjust_result_of_qualified_name_lookup (tree, tree, tree); extern tree copied_binfo (tree, tree); extern tree original_binfo (tree, tree); extern int shared_member_p (tree); /* The representation of a deferred access check. */ typedef struct GTY(()) deferred_access_check { /* The base class in which the declaration is referenced. */ tree binfo; /* The declaration whose access must be checked. */ tree decl; /* The declaration that should be used in the error message. */ tree diag_decl; } deferred_access_check; DEF_VEC_O(deferred_access_check); DEF_VEC_ALLOC_O(deferred_access_check,gc); /* in semantics.c */ extern void push_deferring_access_checks (deferring_kind); extern void resume_deferring_access_checks (void); extern void stop_deferring_access_checks (void); extern void pop_deferring_access_checks (void); extern VEC (deferred_access_check,gc)* get_deferred_access_checks (void); extern void pop_to_parent_deferring_access_checks (void); extern void perform_access_checks (VEC (deferred_access_check,gc)*); extern void perform_deferred_access_checks (void); extern void perform_or_defer_access_check (tree, tree, tree); extern bool speculative_access_check (tree, tree, tree, bool); extern int stmts_are_full_exprs_p (void); extern void init_cp_semantics (void); extern tree do_poplevel (tree); extern void add_decl_expr (tree); extern tree finish_expr_stmt (tree); extern tree begin_if_stmt (void); extern void finish_if_stmt_cond (tree, tree); extern tree finish_then_clause (tree); extern void begin_else_clause (tree); extern void finish_else_clause (tree); extern void finish_if_stmt (tree); extern tree begin_while_stmt (void); extern void finish_while_stmt_cond (tree, tree); extern void finish_while_stmt (tree); extern tree begin_do_stmt (void); extern void finish_do_body (tree); extern void finish_do_stmt (tree, tree); extern tree finish_return_stmt (tree); extern tree begin_for_scope (tree *); extern tree begin_for_stmt (tree, tree); extern void finish_for_init_stmt (tree); extern void finish_for_cond (tree, tree); extern void finish_for_expr (tree, tree); extern void finish_for_stmt (tree); extern tree begin_range_for_stmt (tree, tree); extern void finish_range_for_decl (tree, tree, tree); extern void finish_range_for_stmt (tree); extern tree finish_break_stmt (void); extern tree finish_continue_stmt (void); extern tree begin_switch_stmt (void); extern void finish_switch_cond (tree, tree); extern void finish_switch_stmt (tree); extern tree finish_goto_stmt (tree); extern tree begin_try_block (void); extern void finish_try_block (tree); extern void finish_handler_sequence (tree); extern tree begin_function_try_block (tree *); extern void finish_function_try_block (tree); extern void finish_function_handler_sequence (tree, tree); extern void finish_cleanup_try_block (tree); extern tree begin_handler (void); extern void finish_handler_parms (tree, tree); extern void finish_handler (tree); extern void finish_cleanup (tree, tree); extern bool literal_type_p (tree); extern tree validate_constexpr_fundecl (tree); extern tree register_constexpr_fundef (tree, tree); extern bool check_constexpr_ctor_body (tree, tree); extern tree ensure_literal_type_for_constexpr_object (tree); extern bool potential_constant_expression (tree); extern bool potential_rvalue_constant_expression (tree); extern bool require_potential_constant_expression (tree); extern bool require_potential_rvalue_constant_expression (tree); extern tree cxx_constant_value (tree); extern tree maybe_constant_value (tree); extern tree maybe_constant_init (tree); extern bool is_sub_constant_expr (tree); extern bool reduced_constant_expression_p (tree); extern VEC(tree,heap)* cx_error_context (void); enum { BCS_NO_SCOPE = 1, BCS_TRY_BLOCK = 2, BCS_FN_BODY = 4 }; extern tree begin_compound_stmt (unsigned int); extern void finish_compound_stmt (tree); extern tree finish_asm_stmt (int, tree, tree, tree, tree, tree); extern tree finish_label_stmt (tree); extern void finish_label_decl (tree); extern tree finish_parenthesized_expr (tree); extern tree finish_non_static_data_member (tree, tree, tree); extern tree begin_stmt_expr (void); extern tree finish_stmt_expr_expr (tree, tree); extern tree finish_stmt_expr (tree, bool); extern tree stmt_expr_value_expr (tree); bool empty_expr_stmt_p (tree); extern tree perform_koenig_lookup (tree, VEC(tree,gc) *, bool); extern tree finish_call_expr (tree, VEC(tree,gc) **, bool, bool, tsubst_flags_t); extern tree finish_increment_expr (tree, enum tree_code); extern tree finish_this_expr (void); extern tree finish_pseudo_destructor_expr (tree, tree, tree); extern tree finish_unary_op_expr (enum tree_code, tree); extern tree finish_compound_literal (tree, tree); extern tree finish_fname (tree); extern void finish_translation_unit (void); extern tree finish_template_type_parm (tree, tree); extern tree finish_template_template_parm (tree, tree); extern tree begin_class_definition (tree, tree); extern void finish_template_decl (tree); extern tree finish_template_type (tree, tree, int); extern tree finish_base_specifier (tree, tree, bool); extern void finish_member_declaration (tree); extern tree finish_id_expression (tree, tree, tree, cp_id_kind *, bool, bool, bool *, bool, bool, bool, bool, const char **, location_t); extern tree finish_typeof (tree); extern tree finish_offsetof (tree); extern void finish_decl_cleanup (tree, tree); extern void finish_eh_cleanup (tree); extern void emit_associated_thunks (tree); extern void finish_mem_initializers (tree); extern tree check_template_template_default_arg (tree); extern bool expand_or_defer_fn_1 (tree); extern void expand_or_defer_fn (tree); extern void add_typedef_to_current_template_for_access_check (tree, tree, location_t); extern void check_accessibility_of_qualified_id (tree, tree, tree); extern tree finish_qualified_id_expr (tree, tree, bool, bool, bool, bool); extern void simplify_aggr_init_expr (tree *); extern void finalize_nrv (tree *, tree, tree); extern void note_decl_for_pch (tree); extern tree finish_omp_clauses (tree); extern void finish_omp_threadprivate (tree); extern tree begin_omp_structured_block (void); extern tree finish_omp_structured_block (tree); extern tree begin_omp_parallel (void); extern tree finish_omp_parallel (tree, tree); extern tree begin_omp_task (void); extern tree finish_omp_task (tree, tree); extern tree finish_omp_for (location_t, tree, tree, tree, tree, tree, tree, tree); extern void finish_omp_atomic (enum tree_code, tree, tree); extern void finish_omp_barrier (void); extern void finish_omp_flush (void); extern void finish_omp_taskwait (void); extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool); extern tree baselink_for_fns (tree); extern void finish_static_assert (tree, tree, location_t, bool); extern tree describable_type (tree); extern tree finish_decltype_type (tree, bool, tsubst_flags_t); extern tree finish_trait_expr (enum cp_trait_kind, tree, tree); extern tree build_lambda_expr (void); extern tree build_lambda_object (tree); extern tree begin_lambda_type (tree); extern tree lambda_capture_field_type (tree); extern tree lambda_return_type (tree); extern tree lambda_function (tree); extern void apply_lambda_return_type (tree, tree); extern tree add_capture (tree, tree, tree, bool, bool); extern tree add_default_capture (tree, tree, tree); extern void register_capture_members (tree); extern tree lambda_expr_this_capture (tree); extern tree nonlambda_method_basetype (void); extern void maybe_add_lambda_conv_op (tree); /* in tree.c */ void cp_free_lang_data (tree t); extern tree force_target_expr (tree, tree); extern tree build_target_expr_with_type (tree, tree); extern void lang_check_failed (const char *, int, const char *) ATTRIBUTE_NORETURN; extern tree stabilize_expr (tree, tree *); extern void stabilize_call (tree, tree *); extern void stabilize_aggr_init (tree, tree *); extern bool stabilize_init (tree, tree *); extern tree add_stmt_to_compound (tree, tree); extern void init_tree (void); extern bool pod_type_p (const_tree); extern bool layout_pod_type_p (const_tree); extern bool std_layout_type_p (const_tree); extern bool trivial_type_p (const_tree); extern bool trivially_copyable_p (const_tree); extern bool type_has_nontrivial_default_init (const_tree); extern bool type_has_nontrivial_copy_init (const_tree); extern bool class_tmpl_impl_spec_p (const_tree); extern int zero_init_p (const_tree); extern tree strip_typedefs (tree); extern tree copy_binfo (tree, tree, tree, tree *, int); extern int member_p (const_tree); extern cp_lvalue_kind real_lvalue_p (const_tree); extern cp_lvalue_kind lvalue_kind (const_tree); extern bool lvalue_or_rvalue_with_address_p (const_tree); extern bool builtin_valid_in_constant_expr_p (const_tree); extern tree build_min (enum tree_code, tree, ...); extern tree build_min_nt (enum tree_code, ...); extern tree build_min_non_dep (enum tree_code, tree, ...); extern tree build_min_non_dep_call_vec (tree, tree, VEC(tree,gc) *); extern tree build_cplus_new (tree, tree); extern tree build_aggr_init_expr (tree, tree); extern tree get_target_expr (tree); extern tree build_cplus_array_type (tree, tree); extern tree build_array_of_n_type (tree, int); extern tree build_array_copy (tree); extern tree build_vec_init_expr (tree, tree); extern void diagnose_non_constexpr_vec_init (tree); extern tree hash_tree_cons (tree, tree, tree); extern tree hash_tree_chain (tree, tree); extern tree build_qualified_name (tree, tree, tree, bool); extern int is_overloaded_fn (tree); extern tree get_fns (tree); extern tree get_first_fn (tree); extern tree ovl_cons (tree, tree); extern tree build_overload (tree, tree); extern bool non_static_member_function_p (tree); extern const char *cxx_printable_name (tree, int); extern const char *cxx_printable_name_translate (tree, int); extern tree build_exception_variant (tree, tree); extern tree bind_template_template_parm (tree, tree); extern tree array_type_nelts_total (tree); extern tree array_type_nelts_top (tree); extern tree break_out_target_exprs (tree); extern tree get_type_decl (tree); extern tree decl_namespace_context (tree); extern bool decl_anon_ns_mem_p (const_tree); extern tree lvalue_type (tree); extern tree error_type (tree); extern int varargs_function_p (const_tree); extern bool really_overloaded_fn (tree); extern bool cp_tree_equal (tree, tree); extern tree no_linkage_check (tree, bool); extern void debug_binfo (tree); extern tree build_dummy_object (tree); extern tree maybe_dummy_object (tree, tree *); extern int is_dummy_object (const_tree); extern const struct attribute_spec cxx_attribute_table[]; extern tree make_ptrmem_cst (tree, tree); extern tree cp_build_type_attribute_variant (tree, tree); extern tree cp_build_reference_type (tree, bool); extern tree move (tree); extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); #define cp_build_qualified_type(TYPE, QUALS) \ cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) extern bool cv_qualified_p (const_tree); extern tree cv_unqualified (tree); extern special_function_kind special_function_p (const_tree); extern int count_trees (tree); extern int char_type_p (tree); extern void verify_stmt_tree (tree); extern linkage_kind decl_linkage (tree); extern duration_kind decl_storage_duration (tree); extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, void*, struct pointer_set_t*); #define cp_walk_tree(a,b,c,d) \ walk_tree_1 (a, b, c, d, cp_walk_subtrees) #define cp_walk_tree_without_duplicates(a,b,c) \ walk_tree_without_duplicates_1 (a, b, c, cp_walk_subtrees) extern tree fold_if_not_in_template (tree); extern tree rvalue (tree); extern tree convert_bitfield_to_declared_type (tree); extern tree cp_save_expr (tree); extern bool cast_valid_in_integral_constant_expression_p (tree); extern bool cxx_type_hash_eq (const_tree, const_tree); extern void cxx_print_statistics (void); /* in ptree.c */ extern void cxx_print_xnode (FILE *, tree, int); extern void cxx_print_decl (FILE *, tree, int); extern void cxx_print_type (FILE *, tree, int); extern void cxx_print_identifier (FILE *, tree, int); extern void cxx_print_error_function (diagnostic_context *, const char *, struct diagnostic_info *); /* in typeck.c */ extern bool cxx_mark_addressable (tree); extern int string_conv_p (const_tree, const_tree, int); extern tree cp_truthvalue_conversion (tree); extern tree condition_conversion (tree); extern tree require_complete_type (tree); extern tree require_complete_type_sfinae (tree, tsubst_flags_t); extern tree complete_type (tree); extern tree complete_type_or_else (tree, tree); extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t); extern int type_unknown_p (const_tree); enum { ce_derived, ce_normal, ce_exact }; extern bool comp_except_specs (const_tree, const_tree, int); extern bool comptypes (tree, tree, int); extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree); extern bool compparms (const_tree, const_tree); extern int comp_cv_qualification (const_tree, const_tree); extern int comp_cv_qual_signature (tree, tree); extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool); extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool); extern tree cxx_sizeof_nowarn (tree); extern tree is_bitfield_expr_with_lowered_type (const_tree); extern tree unlowered_expr_type (const_tree); extern tree decay_conversion (tree); extern tree build_class_member_access_expr (tree, tree, tree, bool, tsubst_flags_t); extern tree finish_class_member_access_expr (tree, tree, bool, tsubst_flags_t); extern tree build_x_indirect_ref (tree, ref_operator, tsubst_flags_t); extern tree cp_build_indirect_ref (tree, ref_operator, tsubst_flags_t); extern tree build_array_ref (location_t, tree, tree); extern tree cp_build_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree get_member_function_from_ptrfunc (tree *, tree); extern tree cp_build_function_call (tree, tree, tsubst_flags_t); extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...) ATTRIBUTE_SENTINEL; extern tree cp_build_function_call_vec (tree, VEC(tree,gc) **, tsubst_flags_t); extern tree build_x_binary_op (enum tree_code, tree, enum tree_code, tree, enum tree_code, bool *, tsubst_flags_t); extern tree build_x_array_ref (tree, tree, tsubst_flags_t); extern tree build_x_unary_op (enum tree_code, tree, tsubst_flags_t); extern tree cp_build_addr_expr (tree, tsubst_flags_t); extern tree cp_build_addr_expr_strict (tree, tsubst_flags_t); extern tree cp_build_unary_op (enum tree_code, tree, int, tsubst_flags_t); extern tree unary_complex_lvalue (enum tree_code, tree); extern tree build_x_conditional_expr (tree, tree, tree, tsubst_flags_t); extern tree build_x_compound_expr_from_list (tree, expr_list_kind, tsubst_flags_t); extern tree build_x_compound_expr_from_vec (VEC(tree,gc) *, const char *); extern tree build_x_compound_expr (tree, tree, tsubst_flags_t); extern tree build_compound_expr (location_t, tree, tree); extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t); extern tree build_static_cast (tree, tree, tsubst_flags_t); extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t); extern tree build_const_cast (tree, tree, tsubst_flags_t); extern tree build_c_cast (location_t, tree, tree); extern tree cp_build_c_cast (tree, tree, tsubst_flags_t); extern tree build_x_modify_expr (tree, enum tree_code, tree, tsubst_flags_t); extern tree cp_build_modify_expr (tree, enum tree_code, tree, tsubst_flags_t); extern tree convert_for_initialization (tree, tree, tree, int, impl_conv_rhs, tree, int, tsubst_flags_t); extern int comp_ptr_ttypes (tree, tree); extern bool comp_ptr_ttypes_const (tree, tree); extern bool error_type_p (const_tree); extern int ptr_reasonably_similar (const_tree, const_tree); extern tree build_ptrmemfunc (tree, tree, int, bool, tsubst_flags_t); extern int cp_type_quals (const_tree); extern int type_memfn_quals (const_tree); extern tree apply_memfn_quals (tree, cp_cv_quals); extern bool cp_has_mutable_p (const_tree); extern bool at_least_as_qualified_p (const_tree, const_tree); extern void cp_apply_type_quals_to_decl (int, tree); extern tree build_ptrmemfunc1 (tree, tree, tree); extern void expand_ptrmemfunc_cst (tree, tree *, tree *); extern tree type_after_usual_arithmetic_conversions (tree, tree); extern tree common_pointer_type (tree, tree); extern tree composite_pointer_type (tree, tree, tree, tree, composite_pointer_operation, tsubst_flags_t); extern tree merge_types (tree, tree); extern tree strip_array_domain (tree); extern tree check_return_expr (tree, bool *); extern tree cp_build_binary_op (location_t, enum tree_code, tree, tree, tsubst_flags_t); #define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true) extern tree build_ptrmemfunc_access_expr (tree, tree); extern tree build_address (tree); extern tree build_typed_address (tree, tree); extern tree build_nop (tree, tree); extern tree non_reference (tree); extern tree lookup_anon_field (tree, tree); extern bool invalid_nonstatic_memfn_p (const_tree, tsubst_flags_t); extern tree convert_member_func_to_ptr (tree, tree); extern tree convert_ptrmem (tree, tree, bool, bool, tsubst_flags_t); extern int lvalue_or_else (tree, enum lvalue_use, tsubst_flags_t); extern void check_template_keyword (tree); /* in typeck2.c */ extern void require_complete_eh_spec_types (tree, tree); extern void cxx_incomplete_type_diagnostic (const_tree, const_tree, diagnostic_t); #undef cxx_incomplete_type_error extern void cxx_incomplete_type_error (const_tree, const_tree); #define cxx_incomplete_type_error(V,T) \ (cxx_incomplete_type_diagnostic ((V), (T), DK_ERROR)) extern tree error_not_base_type (tree, tree); extern tree binfo_or_else (tree, tree); extern void cxx_readonly_error (tree, enum lvalue_use); extern void complete_type_check_abstract (tree); extern int abstract_virtuals_error (tree, tree); extern tree store_init_value (tree, tree, int); extern void check_narrowing (tree, tree); extern tree digest_init (tree, tree); extern tree digest_init_flags (tree, tree, int); extern tree build_scoped_ref (tree, tree, tree *); extern tree build_x_arrow (tree); extern tree build_m_component_ref (tree, tree); extern tree build_functional_cast (tree, tree, tsubst_flags_t); extern tree add_exception_specifier (tree, tree, int); extern tree merge_exception_specifiers (tree, tree); /* in mangle.c */ extern void init_mangle (void); extern void mangle_decl (tree); extern const char *mangle_type_string (tree); extern tree mangle_typeinfo_for_type (tree); extern tree mangle_typeinfo_string_for_type (tree); extern tree mangle_vtbl_for_type (tree); extern tree mangle_vtt_for_type (tree); extern tree mangle_ctor_vtbl_for_type (tree, tree); extern tree mangle_thunk (tree, int, tree, tree); extern tree mangle_conv_op_name_for_type (tree); extern tree mangle_guard_variable (tree); extern tree mangle_ref_init_variable (tree); /* in dump.c */ extern bool cp_dump_tree (void *, tree); /* In cp/cp-objcp-common.c. */ extern alias_set_type cxx_get_alias_set (tree); extern bool cxx_warn_unused_global_decl (const_tree); extern size_t cp_tree_size (enum tree_code); extern bool cp_var_mod_type_p (tree, tree); extern void cxx_initialize_diagnostics (diagnostic_context *); extern int cxx_types_compatible_p (tree, tree); extern void init_shadowed_var_for_decl (void); /* in cp-gimplify.c */ extern int cp_gimplify_expr (tree *, gimple_seq *, gimple_seq *); extern void cp_genericize (tree); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); extern tree cxx_omp_clause_default_ctor (tree, tree, tree); extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); extern tree cxx_omp_clause_assign_op (tree, tree, tree); extern tree cxx_omp_clause_dtor (tree, tree); extern void cxx_omp_finish_clause (tree); extern bool cxx_omp_privatize_by_reference (const_tree); /* in name-lookup.c */ extern void suggest_alternatives_for (location_t, tree); /* -- end of C++ */ #endif /* ! GCC_CP_TREE_H */
AlgebraicTriangleCounting.h
/* * AlgebraicTriangleCounting.h * * Created on: Jul 12, 2016 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICTRIANGLECOUNTING_H_ #define NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICTRIANGLECOUNTING_H_ #include "../../base/Algorithm.h" namespace NetworKit { /** * @ingroup algebraic * Implements a triangle counting algorithm for nodes based on algebraic methods. */ template<class Matrix> class AlgebraicTriangleCounting : public Algorithm { public: /** * Creates an instance of AlgebraicTriangleCounting for the given Graph @a graph. * @param graph */ AlgebraicTriangleCounting(const Graph& graph) : A(Matrix::adjacencyMatrix(graph)), directed(graph.isDirected()) {} /** * Computes the number of triangles each node is part of. A triangle is considered as a set of nodes (i.e. if there * is a triangle (u,v,w) it only counts as one triangle at each node). */ void run() override; /** * Returns the score of node @a u. * @param u */ count score(node u) const { if (!hasRun) throw std::runtime_error("AlgebraicTriangleCounting::score(node u): Call run() method first."); assert(u < A.numberOfRows()); return nodeScores[u]; } /** * Returns the scores for all nodes of the graph. If @a moveOut is set to true (false by default) then the scores * are std::moved such that no copy is constructed. * @param moveOut */ std::vector<count> getScores(bool moveOut = false) { if (!hasRun) throw std::runtime_error("AlgebraicTriangleCounting::getScores(): Call run() method first."); hasRun = !moveOut; return moveOut? std::move(nodeScores) : nodeScores; } private: Matrix A; bool directed; std::vector<count> nodeScores; }; template<class Matrix> void AlgebraicTriangleCounting<Matrix>::run() { Matrix powA = A * A * A; nodeScores.clear(); nodeScores.resize(A.numberOfRows(), 0); #pragma omp parallel for for (index i = 0; i < powA.numberOfRows(); ++i) { nodeScores[i] = directed? powA(i,i) : powA(i,i) / 2.0; } hasRun = true; } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_ALGORITHMS_ALGEBRAICTRIANGLECOUNTING_H_ */
disk.h
#pragma once class SolidDisk{ public: static PS::S32 n_init; static PS::F64 m_init; static PS::F64 p; //static PS::F64 f_in; //static PS::F64 f_out; static PS::F64 f_dust; static PS::F64 eta_ice; static PS::F64 a_in; static PS::F64 a_out; static PS::F64 a_ice; static PS::F64 ecc_hill; static PS::F64 inc_hill; static PS::F64 calcDustMass(const PS::F64 a0, const PS::F64 a1, const bool inIce) { const PS::F64 L_CGS = 14959787070000; const PS::F64 M_CGS = 1.9884e33; if ( a1 < a0 ) return 0.; if ( inIce ) { const PS::F64 coef_in = 10. * f_dust /M_CGS*L_CGS*L_CGS; return 2.*M_PI*coef_in/(2.-p) * ( pow(a1, 2.-p) - pow(a0, 2.-p) ); } else { const PS::F64 coef_out = 10. * f_dust * eta_ice /M_CGS*L_CGS*L_CGS; return 2.*M_PI*coef_out/(2.-p) * ( pow(a1, 2.-p) - pow(a0, 2.-p) ); } } static PS::F64 getSemimajorAxis(const PS::F64 a0, const PS::F64 a1) { assert ( a0 < a1 ); PS::F64 R = drand48(); if ( p != 2 ){ return pow( (pow(a1,2.-p) - pow(a0,2.-p)) * R + pow(a0,2.-p), 1./(2.-p) ); } else { return exp( (log(a1) - log(a0)) * R + log(a0) ); } } template <class Tpsys> static void createInitialCondition(Tpsys & pp){ if ( PS::Comm::getRank() == 0 ){ const PS::F64 m_sun = FP_t::m_sun; PS::F64 m_in = 0.; PS::F64 m_out = 0.; PS::S32 n_in = 0; //PS::S32 n_out = 0; //////////////////////////////////// /* Set Particle Mass & Number */ //////////////////////////////////// if ( a_out < a_ice ) { m_in = calcDustMass(a_in, a_out, true); m_out = 0.; } else if ( a_ice < a_in ) { m_in = 0.; m_out = calcDustMass(a_in, a_out, false); } else { m_in = calcDustMass(a_in, a_ice, true); m_out = calcDustMass(a_ice, a_out, false); } assert( n_init >= 0 ); assert( m_init >= 0. ); if ( m_init == 0. ) { assert( n_init > 0 ); m_init = (m_in + m_out) / n_init; } if ( n_init == 0 ){ assert( m_init > 0. ); n_init = (m_in + m_out) / m_init; } n_in = (PS::S32)round(m_in/(m_in + m_out) * n_init); //n_out = n_init - n_in; //////////////////////////////// /* Create Particle System */ //////////////////////////////// pp.setNumberOfParticleLocal(n_init); for ( PS::S32 i=0; i<n_init; i++ ){ pp[i].id = i; pp[i].mass = m_init; // set orbital element PS::F64 ax; PS::F64 h = pow(pp[i].mass/(3.*m_sun), 1./3.); if ( a_out < a_ice || a_ice < a_in ) { ax = getSemimajorAxis(a_in, a_out); } else { if ( i < n_in ) { ax = getSemimajorAxis(a_in, a_ice); } else { ax = getSemimajorAxis(a_ice, a_out); } } PS::F64 ecc = getGaussian(ecc_hill*h); PS::F64 inc = getGaussian(inc_hill*h); PS::F64 l = 2 * M_PI * drand48(); PS::F64 u = solveKeplerEq(l, ecc); PS::F64 omg = 2 * M_PI * drand48(); PS::F64 OMG = 2 * M_PI * drand48(); PS::F64 n = sqrt(m_sun / (ax*ax*ax)); PS::F64vec P, Q; P.x = cos(omg)*cos(OMG) - sin(omg)*sin(OMG)*cos(inc); P.y = cos(omg)*sin(OMG) + sin(omg)*cos(OMG)*cos(inc); P.z = sin(omg)*sin(inc); Q.x = -sin(omg)*cos(OMG) - cos(omg)*sin(OMG)*cos(inc); Q.y = -sin(omg)*sin(OMG) + cos(omg)*cos(OMG)*cos(inc); Q.z = cos(omg)*sin(inc); orbitalElement2PosVel(pp[i].pos, pp[i].vel, m_sun, ax, ecc, n, u, P, Q); } } else { pp.setNumberOfParticleLocal(0); } } }; PS::S32 SolidDisk::n_init = 0; PS::F64 SolidDisk::m_init = 0.; PS::F64 SolidDisk::p = 1.5; PS::F64 SolidDisk::f_dust = 0.71; PS::F64 SolidDisk::eta_ice = 30./7.1; PS::F64 SolidDisk::a_in = 0.98; PS::F64 SolidDisk::a_out = 1.02; PS::F64 SolidDisk::a_ice = 2.0; PS::F64 SolidDisk::ecc_hill = 2.0; PS::F64 SolidDisk::inc_hill = 1.0; class GasDisk{ public: static PS::F64 alpha_gas; static PS::F64 beta_gas; static PS::F64 f_gas; static PS::F64 tau_gas; static PS::F64 C_d; static PS::F64 mu; PS::F64 coef_rho_gas; PS::F64 coef_cs_vk; PS::F64 coef_acc_gd; GasDisk(){ const PS::F64 L_CGS = 14959787070000; const PS::F64 M_CGS = 1.9884e33; const PS::F64 T = 365.25*24.*60.*60./(2.*M_PI); coef_rho_gas = 1.4e-9 * f_gas /M_CGS*L_CGS*L_CGS*L_CGS; const PS::F64 k_B = 1.380649e-16 /(M_CGS*L_CGS*L_CGS)*T*T; const PS::F64 N_A = 6.022140857e23; const PS::F64 m_H = 1./N_A /M_CGS; PS::F64 coef_cs = sqrt(k_B * 280 / (mu * m_H)); PS::F64 coef_vk = sqrt(FP_t::m_sun); coef_cs_vk = coef_cs / coef_vk; coef_acc_gd = 0.5*C_d*M_PI; if ( PS::Comm::getRank() == 0 ) { std::cout << "rho_gas at 1 AU = " << coef_rho_gas << std::endl << "cs/vk at 1 AU = " << coef_cs_vk << std::endl; } } template <class Tpsys> void calcGasDrag(Tpsys & pp, PS::F64 time, PS::F64 L=1., bool clear=true){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ PS::F64 r2 = pp[i].pos.x*pp[i].pos.x + pp[i].pos.y*pp[i].pos.y; PS::F64 r_inv = 1./sqrt(r2); PS::F64 r = r2 * r_inv; PS::F64 rho_gas = coef_rho_gas * pow(r, -alpha_gas); if ( tau_gas != 0. ) rho_gas *= exp(-time / tau_gas); PS::F64 cs_vk = coef_cs_vk * sqrt(sqrt(r)) * pow(L, 1./8.); PS::F64vec ev(-pp[i].pos.y*r_inv, pp[i].pos.x*r_inv, 0.0); PS::F64vec vkep = sqrt(FP_t::m_sun * r_inv) * ev; PS::F64 eta = 0.5 * (alpha_gas + beta_gas) * cs_vk * cs_vk; PS::F64vec vgas = (1.0 - eta)*vkep; PS::F64vec u = pp[i].vel - vgas; //PRL(eta); //PS::F64 rplanet = cbrt(0.75*pp[i].mass/(M_PI*FP_t::dens)); if (clear) pp[i].acc_gd = 0.; if ( pp[i].mass != 0. ) { //pp[i].acc_gd += -coef_acc_gd * rplanet * rplanet * rho_gas * sqrt(u*u) * u / pp[i].mass; pp[i].acc_gd += -coef_acc_gd * pp[i].r_planet * pp[i].r_planet * rho_gas * sqrt(u*u) * u / pp[i].mass; pp[i].acc += pp[i].acc_gd; } } } }; PS::F64 GasDisk::alpha_gas = 11./4.; PS::F64 GasDisk::beta_gas = 0.5; PS::F64 GasDisk::f_gas = 0.71; PS::F64 GasDisk::tau_gas = 1.e6*2.*M_PI; PS::F64 GasDisk::C_d = 1.; PS::F64 GasDisk::mu = 2.34;
GB_unaryop__identity_fp64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp64_uint8 // op(A') function: GB_tran__identity_fp64_uint8 // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp64_uint8 ( double *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
region_layer.c
#include "region_layer.h" #include "activations.h" #include "blas.h" #include "box.h" #include "dark_cuda.h" #include "utils.h" #include <stdio.h> #include <assert.h> #include <string.h> #include <stdlib.h> #define DOABS 1 region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes) { region_layer l = { (LAYER_TYPE)0 }; l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.classes = classes; l.coords = coords; l.cost = (float*)xcalloc(1, sizeof(float)); l.biases = (float*)xcalloc(n * 2, sizeof(float)); l.bias_updates = (float*)xcalloc(n * 2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.max_boxes = max_boxes; l.truths = max_boxes*(5); l.delta = (float*)xcalloc(batch * l.outputs, sizeof(float)); l.output = (float*)xcalloc(batch * l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; l.backward = backward_region_layer; #ifdef GPU l.forward_gpu = forward_region_layer_gpu; l.backward_gpu = backward_region_layer_gpu; l.output_gpu = cuda_make_array(l.output, batch*l.outputs); l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs); #endif fprintf(stderr, "detection\n"); srand(time(0)); return l; } void resize_region_layer(layer *l, int w, int h) { #ifdef GPU int old_w = l->w; int old_h = l->h; #endif l->w = w; l->h = h; l->outputs = h*w*l->n*(l->classes + l->coords + 1); l->inputs = l->outputs; l->output = (float*)xrealloc(l->output, l->batch * l->outputs * sizeof(float)); l->delta = (float*)xrealloc(l->delta, l->batch * l->outputs * sizeof(float)); #ifdef GPU if (old_w < w || old_h < h) { cuda_free(l->delta_gpu); cuda_free(l->output_gpu); l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); } #endif } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; b.y = (j + logistic_activate(x[index + 1])) / h; b.w = exp(x[index + 2]) * biases[2*n]; b.h = exp(x[index + 3]) * biases[2*n+1]; if(DOABS){ b.w = exp(x[index + 2]) * biases[2*n] / w; b.h = exp(x[index + 3]) * biases[2*n+1] / h; } return b; } float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale) { box pred = get_region_box(x, biases, n, index, i, j, w, h); float iou = box_iou(pred, truth); float tx = (truth.x*w - i); float ty = (truth.y*h - j); float tw = log(truth.w / biases[2*n]); float th = log(truth.h / biases[2*n + 1]); if(DOABS){ tw = log(truth.w*w / biases[2*n]); th = log(truth.h*h / biases[2*n + 1]); } delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0])); delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1])); delta[index + 2] = scale * (tw - x[index + 2]); delta[index + 3] = scale * (th - x[index + 3]); return iou; } void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss) { int i, n; if(hier){ float pred = 1; while(class_id >= 0){ pred *= output[index + class_id]; int g = hier->group[class_id]; int offset = hier->group_offset[g]; for(i = 0; i < hier->group_size[g]; ++i){ delta[index + offset + i] = scale * (0 - output[index + offset + i]); } delta[index + class_id] = scale * (1 - output[index + class_id]); class_id = hier->parent[class_id]; } *avg_cat += pred; } else { // Focal loss if (focal_loss) { // Focal Loss float alpha = 0.5; // 0.25 or 0.5 //float gamma = 2; // hardcoded in many places of the grad-formula int ti = index + class_id; float pt = output[ti] + 0.000000000000001F; // http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832 //float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); delta[index + n] *= alpha*grad; if (n == class_id) *avg_cat += output[index + n]; } } else { // default for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); if (n == class_id) *avg_cat += output[index + n]; } } } } float logit(float x) { return log(x/(1.-x)); } float tisnan(float x) { return (x != x); } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc; } void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output); void forward_region_layer(const region_layer l, network_state state) { int i,j,b,t,n; int size = l.coords + l.classes + 1; memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); #endif for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; l.output[index + 4] = logistic_activate(l.output[index + 4]); } } #ifndef GPU if (l.softmax_tree){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1); } } } #endif if(!state.train) return; memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); float avg_iou = 0; float recall = 0; float avg_cat = 0; float avg_obj = 0; float avg_anyobj = 0; int count = 0; int class_count = 0; *(l.cost) = 0; for (b = 0; b < l.batch; ++b) { if(l.softmax_tree){ int onlyclass_id = 0; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); if(!truth.x) break; // continue; int class_id = state.truth[t*5 + b*l.truths + 4]; float maxp = 0; int maxi = 0; if(truth.x > 100000 && truth.y > 100000){ for(n = 0; n < l.n*l.w*l.h; ++n){ int index = size*n + b*l.outputs + 5; float scale = l.output[index-1]; float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id); if(p > maxp){ maxp = p; maxi = n; } } int index = size*maxi + b*l.outputs + 5; delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++class_count; onlyclass_id = 1; break; } } if(onlyclass_id) continue; } for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w; ++i) { for (n = 0; n < l.n; ++n) { int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); float best_iou = 0; int best_class_id = -1; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file if(!truth.x) break; // continue; float iou = box_iou(pred, truth); if (iou > best_iou) { best_class_id = state.truth[t*5 + b*l.truths + 4]; best_iou = iou; } } avg_anyobj += l.output[index + 4]; l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); else{ if (best_iou > l.thresh) { l.delta[index + 4] = 0; if(l.classfix > 0){ delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss); ++class_count; } } } if(*(state.net.seen) < 12800){ box truth = {0}; truth.x = (i + .5)/l.w; truth.y = (j + .5)/l.h; truth.w = l.biases[2*n]; truth.h = l.biases[2*n+1]; if(DOABS){ truth.w = l.biases[2*n]/l.w; truth.h = l.biases[2*n+1]/l.h; } delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01); } } } } for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) { printf("\n Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1); getchar(); continue; // if label contains class_id more than number of classes in the cfg-file } if(!truth.x) break; // continue; float best_iou = 0; int best_index = 0; int best_n = 0; i = (truth.x * l.w); j = (truth.y * l.h); //printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h); box truth_shift = truth; truth_shift.x = 0; truth_shift.y = 0; //printf("index %d %d\n",i, j); for(n = 0; n < l.n; ++n){ int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); if(l.bias_match){ pred.w = l.biases[2*n]; pred.h = l.biases[2*n+1]; if(DOABS){ pred.w = l.biases[2*n]/l.w; pred.h = l.biases[2*n+1]/l.h; } } //printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h); pred.x = 0; pred.y = 0; float iou = box_iou(pred, truth_shift); if (iou > best_iou){ best_index = index; best_iou = iou; best_n = n; } } //printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h); float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale); if(iou > .5) recall += 1; avg_iou += iou; //l.delta[best_index + 4] = iou - l.output[best_index + 4]; avg_obj += l.output[best_index + 4]; l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); if (l.rescore) { l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); } if (l.map) class_id = l.map[class_id]; delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++count; ++class_count; } } //printf("\n"); #ifndef GPU flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0); #endif *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); } void backward_region_layer(const region_layer l, network_state state) { axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1); } void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i; float *const predictions = l.output; #pragma omp parallel for for (i = 0; i < l.w*l.h; ++i){ int j, n; int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = i*l.n + n; int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; if(l.classfix == -1 && scale < .5) scale = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; if(l.softmax_tree){ hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if(map){ for(j = 0; j < 200; ++j){ float prob = scale*predictions[class_index+map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for(j = l.classes - 1; j >= 0; --j){ if(!found && predictions[class_index + j] > .5){ found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index+j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { for(j = 0; j < l.classes; ++j){ float prob = scale*predictions[class_index+j]; probs[index][j] = (prob > thresh) ? prob : 0; } } if(only_objectness){ probs[index][0] = scale; } } } } #ifdef GPU void forward_region_layer_gpu(const region_layer l, network_state state) { /* if(!state.train){ copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); return; } */ flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu); if(l.softmax_tree){ int i; int count = 5; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count); count += group_size; } }else if (l.softmax){ softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5); } float* in_cpu = (float*)xcalloc(l.batch * l.inputs, sizeof(float)); float *truth_cpu = 0; if(state.truth){ int num_truth = l.batch*l.truths; truth_cpu = (float*)xcalloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); } cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs); //cudaStreamSynchronize(get_cuda_stream()); network_state cpu_state = state; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; forward_region_layer(l, cpu_state); //cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs); free(cpu_state.input); if(!state.train) return; cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); //cudaStreamSynchronize(get_cuda_stream()); if(cpu_state.truth) free(cpu_state.truth); } void backward_region_layer_gpu(region_layer l, network_state state) { flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta); } #endif void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w = 0; int new_h = 0; if (((float)netw / w) < ((float)neth / h)) { new_w = netw; new_h = (h * netw) / w; } else { new_h = neth; new_w = (w * neth) / h; } for (i = 0; i < n; ++i) { box b = dets[i].bbox; b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw); b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth); b.w *= (float)netw / new_w; b.h *= (float)neth / new_h; if (!relative) { b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i, j, n, z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w / 2; ++i) { for (n = 0; n < l.n; ++n) { for (z = 0; z < l.classes + l.coords + 1; ++z) { int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if (z == 0) { flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for (i = 0; i < l.outputs; ++i) { l.output[i] = (l.output[i] + flip[i]) / 2.; } } for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; for (n = 0; n < l.n; ++n) { int index = n*l.w*l.h + i; for (j = 0; j < l.classes; ++j) { dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if (dets[index].mask) { for (j = 0; j < l.coords - 4; ++j) { dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if (l.softmax_tree) { hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h); if (map) { for (j = 0; j < 200; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } else { int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h); dets[index].prob[j] = (scale > thresh) ? scale : 0; } } else { if (dets[index].objectness) { for (j = 0; j < l.classes; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); } void zero_objectness(layer l) { int i, n; for (i = 0; i < l.w*l.h; ++i) { for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); l.output[obj_index] = 0; } } }
pi-v15.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if !_DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if !_DEBUG_ start= omp_get_wtime(); #endif /* do computation */ // WARNING : correct code #pragma omp parallel #pragma omp single { #pragma omp task private(i,x) shared(sum) for (i=0; i < num_steps/2; i++) { x = (i+0.5)*step; #pragma omp atomic sum += 4.0/(1.0+x*x); #if _DEBUG_ int id = omp_get_thread_num(); printf("thread id:%d it:%d\n",id,i); #endif } #pragma omp task private(i,x) shared(sum) for (i=num_steps/2; i < num_steps; i++) { x = (i+0.5)*step; #pragma omp atomic sum += 4.0/(1.0+x*x); #if _DEBUG_ int id = omp_get_thread_num(); printf("thread id:%d it:%d\n",id,i); #endif } #pragma omp taskwait #pragma omp task pi = step * sum; } #if !_DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
stencil.c
/* * Simple Stencil example * Main program example * * Brian J Gravelle * gravelle@cs.uoregon.edu * */ #include "mesh.h" #ifdef USE_CALI #include <caliper/cali.h> #endif #include <stdlib.h> #include <stdio.h> #include <omp.h> #ifndef X_SIZE #define X_SIZE 10000 #endif #ifndef Y_SIZE #define Y_SIZE 20000 #endif #ifndef TIME #define TIME 0.0 #endif #ifndef STEP #define STEP 1.0 #endif #ifndef TIME_STOP #define TIME_STOP 5.0 #endif #ifndef FILE_NAME #define FILE_NAME "openmp_results.txt" #endif #if USE_AOS // create and fill the mesh with starting values int init_mesh(struct Mesh ***mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif struct Mesh **_mesh; int err = FALSE; int i, j; double H = 100; double V = 1000; // allocate memory and verify that it worked _mesh = (struct Mesh**) malloc(x_size * sizeof(struct Mesh*)); if(_mesh == NULL) err = TRUE; for (i = 0; i < x_size; ++i) { _mesh[i] = (struct Mesh*) malloc(y_size * sizeof(struct Mesh)); if(_mesh[i] == NULL) err = TRUE; } // define starting values for (i = 0; i < x_size; i++) { V = 1000; for (j = 0; j < y_size; j++) { ACCESS_MESH(_mesh, i, j, avg) = H; ACCESS_MESH(_mesh, i, j, sum) = V; ACCESS_MESH(_mesh, i, j, pde) = i*j; ACCESS_MESH(_mesh, i, j, dep) = H+V; // _mesh[i][j].avg = H; // _mesh[i][j].sum = V; // _mesh[i][j].pde = i*j; // _mesh[i][j].dep = H+V; V += 1000; } H += 100; } *mesh = _mesh; return err; } // liberate the memory void free_mesh(MESH mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i; for (i = 0; i < x_size; ++i) { free(mesh[i]); } free(mesh); } #else // USE_SOA // create and fill the mesh with starting values int init_mesh(struct Mesh *mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int err = FALSE; int i, j; double H = 100; double V = 1000; // allocate memory and verify that it worked mesh->avg = (double**) malloc(x_size * sizeof(double*)); mesh->sum = (double**) malloc(x_size * sizeof(double*)); mesh->pde = (double**) malloc(x_size * sizeof(double*)); mesh->dep = (double**) malloc(x_size * sizeof(double*)); if(mesh->avg == NULL) err = TRUE; if(mesh->sum == NULL) err = TRUE; if(mesh->pde == NULL) err = TRUE; if(mesh->dep == NULL) err = TRUE; for (i = 0; i < x_size; ++i) { mesh->avg[i] = (double*) malloc(y_size * sizeof(double)); mesh->sum[i] = (double*) malloc(y_size * sizeof(double)); mesh->pde[i] = (double*) malloc(y_size * sizeof(double)); mesh->dep[i] = (double*) malloc(y_size * sizeof(double)); if(mesh->avg[i] == NULL) err = TRUE; if(mesh->sum[i] == NULL) err = TRUE; if(mesh->pde[i] == NULL) err = TRUE; if(mesh->dep[i] == NULL) err = TRUE; } // define starting values for (i = 0; i < x_size; i++) { V = 1000; for (j = 0; j < y_size; j++) { mesh->sum[i][j] = H; mesh->avg[i][j] = V; mesh->pde[i][j] = i*j; mesh->dep[i][j] = H+V; V += 1000; } H += 100; } return err; } // liberate the memory void free_mesh(struct Mesh mesh, int x_size, int y_size){ #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i; for (i = 0; i < x_size; ++i) { free(mesh.avg[i]); free(mesh.sum[i]); free(mesh.pde[i]); free(mesh.dep[i]); } free(mesh.avg); free(mesh.sum); free(mesh.pde); free(mesh.dep); } #endif double pythag(double x1, double y1, double x2, double y2) { return (x1-x2)*(x1-x2)+(y1-y2)*(y1-y2); } // perform one iteration of the timestep void do_timestep(MESH mesh, MESH temp_mesh, int x_size, int y_size, double time, double dt) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int thr_id; double dt2 = dt*dt; double C = 0.25, dx2 = 1.0; int _x, _y, n, i, j, t; // looping over all rows of the matrix // main source of paralleism #pragma omp parallel private(_y, n, t, thr_id, dx2) { // establish temporary mesh for this thread thr_id = omp_get_thread_num(); int neighbors[NUM_NEIGHBORS][2]; #ifdef USE_CALI CALI_MARK_BEGIN("computation"); #endif #pragma omp for for (_x = 0; _x < x_size; _x++) { // fill next temp row with starting values for (_y = 0; _y < y_size; _y++) { ACCESS_MESH(temp_mesh, _x, _y, avg) = 0; ACCESS_MESH(temp_mesh, _x, _y, sum) = 0; ACCESS_MESH(temp_mesh, _x, _y, pde) = -2*dt2 * ACCESS_MESH(mesh, _x, _y, pde) * C; ACCESS_MESH(temp_mesh, _x, _y, dep) = -2*dt2 * ACCESS_MESH(mesh, _x, _y, dep) * C; } // actually do some computation for (_y = 0; _y < y_size; _y++) { get_neighbors(x_size, y_size, _x, _y, neighbors); for(n = 0; n < NUM_NEIGHBORS; n++) { ACCESS_MESH(temp_mesh, _x, _y, avg) += ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], avg); } ACCESS_MESH(temp_mesh, _x, _y, avg) /= NUM_NEIGHBORS; for(n = 0; n < NUM_NEIGHBORS; n++) { ACCESS_MESH(temp_mesh, _x, _y, sum) += ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], sum)/NUM_NEIGHBORS; } for(n = 0; n < NUM_NEIGHBORS; n++){ dx2 = pythag(_x, _y, neighbors[n][X], neighbors[n][Y]); // dx^2 ACCESS_MESH(temp_mesh, _x, _y, pde) += (-2*dt2 * ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], pde)) / ((dx2 + 1.0) * C); } for(n = 0; n < NUM_NEIGHBORS; n++){ dx2 = pythag(_x, _y, neighbors[n][X], neighbors[n][Y]); // dx^2 ACCESS_MESH(temp_mesh, _x, _y, dep) += (ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], avg)*dt2 * \ ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], dep)) / \ ((dx2 + ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], sum)) * C); } } // _y loop } // _x loop #ifdef USE_CALI CALI_MARK_END("computation"); #endif } // parallel region } // do time step // print the mesh void print_mesh(MESH mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i, j; for (i = 0; i < x_size; i++) { printf("x = %d\n", i); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, avg)); } printf("\n"); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, sum)); } printf("\n"); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, pde)); } printf("\n"); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, dep)); } printf("\n\n"); } } // print the mesh to file void output_mesh(FILE* file, MESH mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i, j; for (i = 0; i < x_size; i++) { fprintf(file, "x = %d\n", i); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, avg)); } fprintf(file, "\n"); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, sum)); } fprintf(file, "\n"); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, pde)); } fprintf(file, "\n"); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, dep)); } fprintf(file, "\n\n"); } } int test_small_mesh() { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int err = FALSE; MESH mesh_1; MESH mesh_2; int x_size = 5; int y_size = 10; double time = 0.0; printf("init_mesh...\n"); err = err | init_mesh(&mesh_1, x_size, y_size); err = err | init_mesh(&mesh_2, TEMP_ROWS, y_size); #if USE_AOS if(mesh_1 == NULL) return 1; if(mesh_2 == NULL) return 1; #else if(mesh_1.avg == NULL) return 1; if(mesh_2.avg == NULL) return 1; if(mesh_1.sum == NULL) return 1; if(mesh_2.sum == NULL) return 1; if(mesh_1.pde == NULL) return 1; if(mesh_2.pde == NULL) return 1; if(mesh_1.dep == NULL) return 1; if(mesh_2.dep == NULL) return 1; #endif printf("print_mesh...\n"); print_mesh(mesh_1, x_size, y_size); printf("do_timestep...\n"); do_timestep(mesh_1, mesh_2, x_size, y_size, time, 1.0); printf("print_mesh...\n"); print_mesh(mesh_1, x_size, y_size); printf("free_mesh...\n"); free_mesh(mesh_1, x_size, y_size); free_mesh(mesh_2, TEMP_ROWS, y_size); return err; } int run_custom_mesh(int x_size, int y_size, double time, double step, double time_stop) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int err = FALSE; MESH main_mesh; MESH temp_mesh; double wall_tot_start, wall_tot_end; double wall_init_start, wall_init_end; double wall_step_start, wall_step_end; double wall_free_start, wall_free_end; int num_thr; omp_set_num_threads(4); #pragma omp parallel { if(omp_get_thread_num()==0) num_thr = omp_get_num_threads(); } printf("\n\nRunning new Stencil with \n\ x_size = %d \n\ y_size = %d \n\ num_thr = %d \n\ start time = %f \n\ time step = %f \n\ end time = %f \n\n", x_size, y_size, num_thr, time, step, time_stop); wall_tot_start = omp_get_wtime(); wall_init_start = omp_get_wtime(); printf("init_mesh......."); fflush(stdout); err = err | init_mesh(&main_mesh, x_size, y_size); err = err | init_mesh(&temp_mesh, x_size, y_size); #if USE_AOS if(main_mesh == NULL) return 1; if(temp_mesh == NULL) return 1; #else if(main_mesh.avg == NULL) return 1; if(temp_mesh.avg == NULL) return 1; if(main_mesh.sum == NULL) return 1; if(temp_mesh.sum == NULL) return 1; if(main_mesh.pde == NULL) return 1; if(temp_mesh.pde == NULL) return 1; if(main_mesh.dep == NULL) return 1; if(temp_mesh.dep == NULL) return 1; #endif wall_init_end = omp_get_wtime(); printf("%fs\n", (wall_init_end - wall_init_start)); #ifdef DO_IO printf("output to file....."); fflush(stdout); double io_start = omp_get_wtime(); FILE* file = fopen(FILE_NAME, "w+"); fprintf(file, "\n\nRunning new Stencil with \n\ x_size = %d \n\ y_size = %d \n\ start time = %f \n\ time step = %f \n\ end time = %f \n\n", x_size, y_size, time, step, time_stop); output_mesh(file, main_mesh, x_size, y_size); printf("%fs\n", (omp_get_wtime() - io_start)); #endif while(time < time_stop) { printf("timestep %.2f...", time); fflush(stdout); wall_step_start = omp_get_wtime(); do_timestep(main_mesh, temp_mesh, x_size, y_size, time, step); time += step; wall_step_end = omp_get_wtime(); printf("%fs\n", (wall_step_end - wall_step_start)); printf("timestep %.2f...", time); fflush(stdout); wall_step_start = omp_get_wtime(); do_timestep(temp_mesh, main_mesh, x_size, y_size, time, step); time += step; wall_step_end = omp_get_wtime(); printf("%fs\n", (wall_step_end - wall_step_start)); } #ifdef DO_IO io_start = omp_get_wtime(); printf("output to file....."); fflush(stdout); fprintf(file, "\n\n"); output_mesh(file, main_mesh, x_size, y_size); fprintf(file, "\n\n"); fclose(file); printf("%fs\n", (omp_get_wtime() - io_start)); #endif printf("free_mesh.......\n"); fflush(stdout); wall_free_start = omp_get_wtime(); free_mesh(main_mesh, x_size, y_size); free_mesh(temp_mesh, x_size, y_size); wall_free_end = omp_get_wtime(); printf("%fs\n", (wall_free_end - wall_free_start)); wall_tot_end = omp_get_wtime(); printf("\n total time: %fs\n", (wall_tot_end - wall_tot_start)); return err; } // main function int main(int argc, char **argv) { #ifdef USE_CALI cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS); #pragma omp parallel { cali_set_int(thread_attr, omp_get_thread_num()); } #endif int err = FALSE; // err = err | test_small_mesh(); // printf("\n\n"); err = err | run_custom_mesh(X_SIZE, Y_SIZE, TIME, STEP, TIME_STOP); return err; }