source
stringlengths
3
92
c
stringlengths
26
2.25M
parallel_macros.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2010, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // Utility macros for parallelism. // ========================================================================== #ifndef SEQAN_PARALLEL_PARALLEL_MACROS_H_ #define SEQAN_PARALLEL_PARALLEL_MACROS_H_ /** .Macro.SEQAN_OMP_PRAGMA ..summary:Portable conditional $#pragma$ issuing if OpenMP is enabled. ..cat:Parallelism ..signature:SEQAN_OMP_PRAGMA(x) ..param.x:The string to issue behind $#pragma omp$. ..remarks:This macro uses portable pragma generation, dependent on the macro $_OPENMP$ being defined (as by the OpenMP standard). ..remarks:This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP to suppress warnings. ..example.text:Parallelize loop with OpenMP if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for for (int i = 0; i < x; ++i) { // Do work. } ..example.text:Make an addition atomic if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic i += 1; */ #ifdef _OPENMP #include <omp.h> #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // GCC _Pragma operator #define SEQAN_DO_PRAGMA(x) _Pragma(#x) #define SEQAN_OMP_PRAGMA(x) SEQAN_DO_PRAGMA(omp x) #else // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // MSVC __pragma-operator #define SEQAN_OMP_PRAGMA(x) __pragma (omp x) #endif // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) #else // #ifdef _OPENMP #define SEQAN_OMP_PRAGMA(x) #endif // #ifdef _OPENMP #endif // SEQAN_PARALLEL_PARALLEL_MACROS_H_
Reductions.h
#ifndef _REDUCTIONS_H_ #define _REDUCTIONS_H_ #include <mpi.h> #include <sys/time.h> #include <iostream> #include <iomanip> #include <functional> #include <algorithm> #include <vector> #include <string> #include <sstream> #include "CombBLAS/CombBLAS.h" #include "Glue.h" #include "CCGrid.h" namespace combblas { /*************************************************************************** * Distribute a local m/sqrt(p) x n/sqrt(p) matrix (represented by a list of tuples) across layers * so that a each processor along the third dimension receives m/sqrt(p) x n/(c*sqrt(p)) submatrices. * After receiving c submatrices, they are merged to create one m/sqrt(p) x n/(c*sqrt(p)) matrix. * Assumption: input tuples are deleted * Inputs: * fibWorld: Communicator along the third dimension * localmerged: input array of tuples, which will be distributed across layers * Output: output array of tuples, after distributing across layers and merging locally in the received processor * ***************************************************************************/ template <typename SR, typename IT, typename NT> SpTuples<IT,NT> * ParallelReduce_Alltoall_threaded(MPI_Comm & fibWorld, SpTuples<IT,NT> * & localmerged) { double comp_begin, comm_begin, comp_time=0, comm_time=0; int fprocs, fibrank; MPI_Comm_size(fibWorld,&fprocs); MPI_Comm_rank(fibWorld,&fibrank); IT mdim = localmerged->getnrow(); IT ndim = localmerged->getncol(); if(fprocs == 1) { return localmerged; } // ------------ find splitters to distributed across layers ----------- comp_begin = MPI_Wtime(); std::vector<int> send_sizes(fprocs); std::vector<int> recv_sizes(fprocs); std::vector<int> recv_offsets(fprocs); std::vector<int> send_offsets = findColSplitters<int>(localmerged, fprocs); for(int i=0; i<fprocs; i++) { send_sizes[i] = send_offsets[i+1] - send_offsets[i]; } comp_time += (MPI_Wtime() - comp_begin); // ------------ Communicate counts ----------- comm_begin = MPI_Wtime(); MPI_Alltoall( send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1, MPI_INT,fibWorld); comm_time += (MPI_Wtime() - comm_begin); MPI_Datatype MPI_triple; MPI_Type_contiguous(sizeof(std::tuple<IT,IT,NT>), MPI_CHAR, &MPI_triple); MPI_Type_commit(&MPI_triple); // ------------ Allocate memory to receive data ----------- comp_begin = MPI_Wtime(); int recv_count = 0; for( int i = 0; i < fprocs; i++ ) { recv_count += recv_sizes[i]; } std::tuple<IT,IT,NT> * recvbuf = static_cast<std::tuple<IT, IT, NT>*> (::operator new (sizeof(std::tuple<IT, IT, NT>[recv_count]))); recv_offsets[0] = 0; for( int i = 1; i < fprocs; i++ ) { recv_offsets[i] = recv_offsets[i-1]+recv_sizes[i-1]; } comp_time += (MPI_Wtime() - comp_begin); // ------------ Communicate split tuples ----------- comm_begin = MPI_Wtime(); MPI_Alltoallv( localmerged->tuples, send_sizes.data(), send_offsets.data(), MPI_triple, recvbuf, recv_sizes.data(), recv_offsets.data(), MPI_triple, fibWorld); // WARNING: is this big enough? comm_time += (MPI_Wtime() - comm_begin); // -------- update column indices of split tuples ---------- comp_begin = MPI_Wtime(); IT ndimSplit = ndim/fprocs; if(fibrank==(fprocs-1)) ndimSplit = ndim - ndimSplit * fibrank; IT coloffset = fibrank * ndimSplit; #pragma omp parallel for for(int k=0; k<recv_count; k++) { std::get<1>(recvbuf[k]) = std::get<1>(recvbuf[k]) - coloffset; } // -------- create vector of SpTuples for MultiwayMerge ---------- std::vector< SpTuples<IT,NT>* > lists; for(int i=0; i< fprocs; ++i) { SpTuples<IT, NT>* spTuples = new SpTuples<IT, NT> (recv_sizes[i], mdim, ndimSplit, &recvbuf[recv_offsets[i]], true); // If needed pass an empty object of proper dimension lists.push_back(spTuples); } // -------- merge received tuples ---------- SpTuples<IT,NT> * globalmerged = MultiwayMerge<SR>(lists, mdim, ndimSplit, false); comp_time += (MPI_Wtime() - comp_begin); comp_reduce_layer += comp_time; comm_reduce += comm_time; ::operator delete(recvbuf); delete localmerged; // not sure if we can call ::operator delete here return globalmerged; } template <typename NT, typename IT> SpDCCols<IT,NT> * ReduceAll_threaded(std::vector< SpTuples<IT,NT>* > & unreducedC, CCGrid & CMG) { typedef PlusTimesSRing<double, double> PTDD; IT mdim = unreducedC[0]->getnrow(); IT ndim = unreducedC[0]->getncol(); // ------ merge list of tuples from n/sqrt(p) stages of SUMMA ------- double loc_beg1 = MPI_Wtime(); //SpTuples<IT, NT>* localmerged = multiwayMerge(unreducedC, true); SpTuples<IT, NT>* localmerged = MultiwayMerge<PTDD>(unreducedC, mdim, ndim, true); comp_reduce += (MPI_Wtime() - loc_beg1); // scatter local tuples across layers SpTuples<IT,NT> * mergedSpTuples = ParallelReduce_Alltoall_threaded<PTDD>(CMG.fiberWorld, localmerged); loc_beg1 = MPI_Wtime(); // TODO: this is not a good constructor. Change it back to SpTuple-based constructor SpDCCols<IT,NT> * reducedC = new SpDCCols<IT,NT>(mergedSpTuples->getnrow(), mergedSpTuples->getncol(), mergedSpTuples->getnnz(), mergedSpTuples->tuples, false); comp_result += (MPI_Wtime() - loc_beg1); delete mergedSpTuples; // too expensive return reducedC; } } #endif
NeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_iNeighborhoodScale(2), m_iCEFScale(2), m_iRefineIter(2), m_iCEF(1000), m_iMaxCheckForRefineGraph(10000) { m_pNeighborhoodGraph.SetName("Graph"); } ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) = 0; template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::cout << "build RNG graph!" << std::endl; m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale; m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); std::cout << "Build RNG Graph end!" << std::endl; return; } { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize); std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize)); std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>()); for (SizeType i = 0; i < m_iGraphSize; i++) for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) (NeighborhoodDists)[i][j] = MaxDist; std::cout << "Parallel TpTree Partition begin " << std::endl; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; i++) { Sleep(i * 100); std::srand(clock()); for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); std::cout << "Finish Getting Leaves for Tree " << i << std::endl; } std::cout << "Parallel TpTree Partition done" << std::endl; for (int i = 0; i < m_iTPTNumber; i++) { #pragma omp parallel for schedule(dynamic) for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++) { SizeType start_index = TptreeLeafNodes[i][j].first; SizeType end_index = TptreeLeafNodes[i][j].second; if (omp_get_thread_num() == 0) std::cout << "\rProcessing Tree " << i << ' ' << j * 100 / TptreeLeafNodes[i].size() << '%'; for (SizeType x = start_index; x < end_index; x++) { for (SizeType y = x + 1; y <= end_index; y++) { SizeType p1 = TptreeDataIndices[i][x]; SizeType p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); std::cout << std::endl; } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); } if (m_iMaxCheckForRefineGraph > 0) { RefineGraph<T>(index, idmap); } } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { m_iCEF *= m_iCEFScale; for (int iter = 0; iter < m_iRefineIter - 1; iter++) { #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false); if (i % 1000 == 0) std::cout << "\rRefine " << iter << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; } m_iCEF /= m_iCEFScale; m_iNeighborhoodSize /= m_iNeighborhoodScale; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false); if (i % 1000 == 0) std::cout << "\rRefine " << (m_iRefineIter - 1) << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices, std::ostream* output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { SizeType R = (SizeType)indices.size(); if (newGraph != nullptr) { newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize); newGraph->m_iGraphSize = R; newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize; } #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < R; i++) { RefineNode<T>(index, indices[i], false, false); SizeType *nodes, *outnodes; nodes = outnodes = m_pNeighborhoodGraph[indices[i]]; if (newGraph != nullptr) outnodes = newGraph->m_pNeighborhoodGraph[i]; std::unordered_map<SizeType, SizeType>::const_iterator iter; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (nodes[j] >= 0 && nodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[nodes[j]]; if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second; } if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end()) outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second; } if (output != nullptr) { output->write((char*)&R, sizeof(SizeType)); output->write((char*)&m_iNeighborhoodSize, sizeof(DimensionType)); for (SizeType i = 0; i < R; i++) { output->write((char*)m_pNeighborhoodGraph[indices[i]], sizeof(SizeType) * m_iNeighborhoodSize); } std::cout << "Save Refine " << m_pNeighborhoodGraph.Name() << " (" << R << ", " << m_iNeighborhoodSize << ") Finish!" << std::endl; } return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), m_iCEF + 1); index->SearchIndex(query, searchDeleted); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), m_iCEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= m_iCEF; j++) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; InsertNeighbors(index, item->VID, node, item->Dist); } } } template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, std::vector<std::pair<SizeType, SizeType>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.push_back(std::make_pair(first, last)); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (DimensionType j = 0; j < index->GetFeatureDim(); j++) { Variance.push_back(BasicResult(j, 0)); } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<SizeType> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; i++) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; i++) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] /= sumweight; } float mean = 0; for (SizeType j = 0; j < count; j++) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (SizeType j = 0; j < count; j++) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { bestweight[j] = weight[j]; } } } SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } inline std::uint64_t BufferSize() const { return m_pNeighborhoodGraph.BufferSize(); } bool LoadGraph(std::string sGraphFilename) { if (!m_pNeighborhoodGraph.Load(sGraphFilename)) return false; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return true; } bool LoadGraph(char* pGraphMemFile) { m_pNeighborhoodGraph.Load(pGraphMemFile); m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return true; } bool SaveGraph(std::string sGraphFilename) const { std::cout << "Save " << m_pNeighborhoodGraph.Name() << " To " << sGraphFilename << std::endl; std::ofstream output(sGraphFilename, std::ios::binary); if (!output.is_open()) return false; SaveGraph(output); output.close(); return true; } bool SaveGraph(std::ostream& output) const { output.write((char*)&m_iGraphSize, sizeof(SizeType)); output.write((char*)&m_iNeighborhoodSize, sizeof(DimensionType)); for (SizeType i = 0; i < m_iGraphSize; i++) output.write((char*)m_pNeighborhoodGraph[i], sizeof(SizeType) * m_iNeighborhoodSize); std::cout << "Save " << m_pNeighborhoodGraph.Name() << " (" << m_iGraphSize << ", " << m_iNeighborhoodSize << ") Finish!" << std::endl; return true; } inline ErrorCode AddBatch(SizeType num) { ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num); if (ret != ErrorCode::Success) return ret; m_iGraphSize += num; return ErrorCode::Success; } inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; } inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; } void Update(SizeType row, DimensionType col, SizeType val) { std::lock_guard<std::mutex> lock(m_dataUpdateLock); m_pNeighborhoodGraph[row][col] = val; } inline void SetR(SizeType rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; } inline SizeType R() const { return m_iGraphSize; } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure SizeType m_iGraphSize; COMMON::Dataset<SizeType> m_pNeighborhoodGraph; std::mutex m_dataUpdateLock; public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; DimensionType m_iNeighborhoodSize; int m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iMaxCheckForRefineGraph; }; } } #endif
GI.h
#include <parse.h> #define SELF_GRAVITY #define FLAG_GI #ifdef PARTICLE_SIMULATOR_TWO_DIMENSION #error #endif template <class Ptcl> class GI : public Problem<Ptcl>{ public: static const double END_TIME; static void setupIC(PS::ParticleSystem<Ptcl>& sph_system, system_t& sysinfo, PS::DomainInfo& dinfo){ const bool createTarget = true;//set false if you make an impactor. const double Corr = .98;//Correction Term ///////// //place ptcls ///////// std::vector<Ptcl> ptcl; std::vector<Ptcl> tar;//Target std::vector<Ptcl> imp;//Impactor ///////// // Use parameters from input file, or defaults if none provided // TODO: Currently the input file has to be in the same directory as the executable // Change this into a command-line parameter. ParameterFile parameter_file("input.txt"); PS::F64 UnitMass = parameter_file.getValueOf("UnitMass", 6.0e+24); PS::F64 UnitRadi = parameter_file.getValueOf("UnitRadi", 6400e+3); PS::F64 coreFracRadi = parameter_file.getValueOf("coreFracRadi", 3500.0e+3 / 6400.0e+3); PS::F64 coreFracMass = parameter_file.getValueOf("coreFracMass", 0.3); PS::F64 imptarMassRatio = parameter_file.getValueOf("imptarMassRatio", 0.1); ///////// const PS::F64 Expand = 1.1; const PS::F64 tarMass = UnitMass; const PS::F64 tarRadi = UnitRadi; const PS::F64 tarCoreMass = tarMass * coreFracMass; const PS::F64 tarCoreRadi = tarRadi * coreFracRadi; const PS::F64 impMass = imptarMassRatio * tarMass; const PS::F64 impRadi = Expand * cbrt(impMass / tarMass) * UnitRadi; const PS::F64 impCoreMass = impMass * coreFracMass; const PS::F64 impCoreRadi = impRadi * coreFracRadi; const double offset = 5.0 * UnitRadi; const PS::F64 dx = 1.0 / 39; const PS::F64 Grav = 6.67e-11; std::cout << impRadi / tarRadi << std::endl; std::cout << impCoreRadi / impRadi << std::endl; /////////////////// //Dummy put to determine # of ptcls /////////////////// //target int tarNmntl = 0; for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= tarRadi || r <= tarCoreRadi) continue; ++ tarNmntl; } } } int tarNcore; double tarCoreShrinkFactor = 1.0; while(tarCoreShrinkFactor *= 0.99){ tarNcore = 0; for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = tarCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= Corr * tarCoreRadi) continue; ++ tarNcore; } } } if((double)(tarNcore) / (double)(tarNcore + tarNmntl) > coreFracMass) break; } //imp int impNmntl = 0; for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = Expand * sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= impRadi || r <= impCoreRadi) continue; ++ impNmntl; } } } double impCoreShrinkFactor = 1.0; int impNcore; while(impCoreShrinkFactor *= 0.99){ impNcore = 0; for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = Expand * impCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= Corr * impCoreRadi) continue; ++ impNcore; } } } if((double)(impNcore) / (double)(impNcore + impNmntl) > coreFracMass) break; } /////////////////// //Dummy end /////////////////// const int tarNptcl = tarNcore + tarNmntl; const int impNptcl = impNcore + impNmntl; const int Nptcl = tarNptcl + impNptcl; std::cout << "Target :" << tarNptcl << std::endl; std::cout << " radius : " << tarRadi << std::endl; std::cout << " total-to-core : " << (double)(tarNcore) / (double)(tarNptcl) << std::endl; std::cout << " # of core ptcls : " << tarNcore << std::endl; std::cout << " # of mantle ptcls: " << tarNmntl << std::endl; std::cout << " core density : " << tarCoreMass / (4.0 * math::pi / 3.0 * tarCoreRadi * tarCoreRadi * tarCoreRadi * Corr * Corr * Corr) << std::endl; std::cout << " mantle density : " << (tarMass - tarCoreMass) / (4.0 * math::pi / 3.0 * (tarRadi * tarRadi * tarRadi - tarCoreRadi * tarCoreRadi * tarCoreRadi)) << std::endl; std::cout << " mean density : " << tarMass / (4.0 * math::pi / 3.0 * tarRadi * tarRadi * tarRadi) << std::endl; std::cout << "Impactor:" << impNptcl << std::endl; std::cout << " radius : " << impRadi << std::endl; std::cout << " total-to-core : " << (double)(impNcore) / (double)(impNptcl) << std::endl; std::cout << " # of core ptcls : " << impNcore << std::endl; std::cout << " # of mantle ptcls: " << impNmntl << std::endl; std::cout << " core density : " << impCoreMass / (4.0 * math::pi / 3.0 * impCoreRadi * impCoreRadi * impCoreRadi * Corr * Corr * Corr) << std::endl; std::cout << " mantle density : " << (impMass - impCoreMass) / (4.0 * math::pi / 3.0 * (impRadi * impRadi * impRadi - impCoreRadi * impCoreRadi * impCoreRadi)) << std::endl; std::cout << " mean density : " << impMass / (4.0 * math::pi / 3.0 * impRadi * impRadi * impRadi) << std::endl; std::cout << "Total:" << Nptcl << std::endl; std::cout << "Tar-to-Imp mass ratio: " << (double)(impNmntl) / (double)(tarNmntl) << std::endl; const int NptclIn1Node = Nptcl / PS::Comm::getNumberOfProc(); /////////////////// //Real put /////////////////// PS::S32 id = 0; //Put Tar. for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= tarRadi || r <= tarCoreRadi) continue; Ptcl ith; ith.pos.x = UnitRadi * x; ith.pos.y = UnitRadi * y; ith.pos.z = UnitRadi * z; ith.dens = (tarMass - tarCoreMass) / (4.0 / 3.0 * math::pi * (tarRadi * tarRadi * tarRadi - tarCoreRadi * tarCoreRadi * tarCoreRadi)); ith.mass = tarMass + impMass; ith.eng = 0.1 * Grav * tarMass / tarRadi; ith.id = id++; ith.setPressure(&Granite); ith.tag = 0; if(ith.id / NptclIn1Node == PS::Comm::getRank()) tar.push_back(ith); } } } for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = tarCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= Corr * tarCoreRadi) continue; Ptcl ith; ith.pos.x = tarCoreShrinkFactor * UnitRadi * x; ith.pos.y = tarCoreShrinkFactor * UnitRadi * y; ith.pos.z = tarCoreShrinkFactor * UnitRadi * z; ith.dens = tarCoreMass / (4.0 / 3.0 * math::pi * tarCoreRadi * tarCoreRadi * tarCoreRadi * Corr * Corr * Corr); ith.mass = tarMass + impMass; ith.eng = 0.1 * Grav * tarMass / tarRadi; ith.id = id++; ith.setPressure(&Iron); ith.tag = 1; if(ith.id / NptclIn1Node == PS::Comm::getRank()) tar.push_back(ith); } } } //imp for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = Expand * sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= impRadi || r <= impCoreRadi) continue; Ptcl ith; ith.pos.x = Expand * UnitRadi * x + offset; ith.pos.y = Expand * UnitRadi * y; ith.pos.z = Expand * UnitRadi * z; ith.dens = (impMass - impCoreMass) / (4.0 / 3.0 * math::pi * (impRadi * impRadi * impRadi - impCoreRadi * impCoreRadi * impCoreRadi)); ith.mass = tarMass + impMass; ith.eng = 0.1 * Grav * tarMass / tarRadi; ith.id = id++; ith.setPressure(&Granite); ith.tag = 2; if(ith.id / NptclIn1Node == PS::Comm::getRank()) imp.push_back(ith); } } } for(PS::F64 x = -1.0 ; x <= 1.0 ; x += dx){ for(PS::F64 y = -1.0 ; y <= 1.0 ; y += dx){ for(PS::F64 z = -1.0 ; z <= 1.0 ; z += dx){ const PS::F64 r = Expand * impCoreShrinkFactor * sqrt(x*x + y*y + z*z) * UnitRadi; if(r >= impCoreRadi) continue; Ptcl ith; ith.pos.x = Expand * impCoreShrinkFactor * UnitRadi * x + offset; ith.pos.y = Expand * impCoreShrinkFactor * UnitRadi * y; ith.pos.z = Expand * impCoreShrinkFactor * UnitRadi * z; ith.dens = impCoreMass / (4.0 / 3.0 * math::pi * impCoreRadi * impCoreRadi * impCoreRadi * Corr * Corr * Corr); ith.mass = tarMass + impMass; ith.eng = 0.1 * Grav * tarMass / tarRadi; ith.id = id++; ith.setPressure(&Iron); ith.tag = 3; if(ith.id / NptclIn1Node == PS::Comm::getRank()) imp.push_back(ith); } } } for(PS::U32 i = 0 ; i < tar.size() ; ++ i){ tar[i].mass /= (PS::F64)(Nptcl); } for(PS::U32 i = 0 ; i < imp.size() ; ++ i){ imp[i].mass /= (PS::F64)(Nptcl); } if(createTarget == true){ for(PS::U32 i = 0 ; i < tar.size() ; ++ i){ ptcl.push_back(tar[i]); } }else{ for(PS::U32 i = 0 ; i < imp.size() ; ++ i){ ptcl.push_back(imp[i]); } } const PS::S32 numPtclLocal = ptcl.size(); sph_system.setNumberOfParticleLocal(numPtclLocal); for(PS::U32 i = 0 ; i < ptcl.size() ; ++ i){ sph_system[i] = ptcl[i]; } //Fin. std::cout << "# of ptcls = " << ptcl.size() << std::endl; std::cout << "setup..." << std::endl; } static void setEoS(PS::ParticleSystem<Ptcl>& sph_system){ for(PS::U64 i = 0 ; i < sph_system.getNumberOfParticleLocal() ; ++ i){ if(sph_system[i].tag % 2 == 0){ sph_system[i].setPressure(&Granite); }else{ sph_system[i].setPressure(&Iron); } } } static void addExternalForce(PS::ParticleSystem<Ptcl>& sph_system, system_t& sysinfo){ if(sysinfo.time >= 5000) return; std::cout << "Add Ext. Force!!!" << std::endl; #pragma omp parallel for for(PS::S32 i = 0 ; i < sph_system.getNumberOfParticleLocal() ; ++ i){ sph_system[i].acc += - sph_system[i].vel * 0.05 / sph_system[i].dt; } } }; template <class Ptcl> const double GI<Ptcl>::END_TIME = 1.0e+4;
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* This is simply a convenience function around a Min/Max Histogram Stretch */ return MinMaxStretchImage(image, channel, 0.0, 0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetMagickToken(p,&p,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.21267f*image->colormap[i].red+0.71516f*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(q)+0.71516f*GetPixelGreen(q)+ 0.07217f*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,RGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); exception=(&image->exception); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); (void) InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust), QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImageChannel) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType linear, status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ linear=MagickFalse; if (image->colorspace == sRGBColorspace) { linear=MagickTrue; (void) TransformImageColorspace(image,RGBColorspace); } status=MagickTrue; exception=(&image->exception); (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=PixelIntensityToQuantum(image,p); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImageChannel) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); if (linear != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define Enhance(weight) \ mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \ distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \ distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \ mean)*distance*distance; \ mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \ distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) pixel.green; \ distance_squared+=4.0*distance*distance; \ mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \ distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) pixel.blue; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ mean=((MagickRealType) r->opacity+pixel.opacity)/2; \ distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \ QuantumRange/25.0f)) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket aggregate; MagickRealType distance, distance_squared, mean, total_weight; PixelPacket pixel; register const PixelPacket *restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); r=p+(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+2*(image->columns+4); Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0); r=p+3*(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+4*(image->columns+4); Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/ total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType linear, status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ linear=MagickFalse; if (image->colorspace == sRGBColorspace) { linear=MagickTrue; (void) TransformImageColorspace(image,RGBColorspace); } (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) ResetMagickMemory(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImageChannel) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); if (linear != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status|=GammaImageChannel(image,GreenChannel,(double) gamma.green); status|=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma)))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } } } /* Gamma-correct image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImageChannel) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ static inline size_t MagickMin(const size_t x,const size_t y) { if (x < y) return(x); return(y); } MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,RGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=MagickMin(hald_image->columns,hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel3); offset+=cube_size; (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel4); MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,point.z,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImageChannel) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantiumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline MagickRealType LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0; level_pixel=(MagickRealType) QuantumRange*pow(scale*((double) pixel- black_point),1.0/gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImageChannel) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantiumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) (ClampToQuantum(((MagickRealType) \ pow((double)(QuantumScale*(x)),1.0/gamma))*(white_point-black_point)+ \ black_point)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,RGBColorspace); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue( GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImageChannel) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickFalse; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status|=LevelImageChannel(image,RedChannel, black_color->red,white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelImageChannel(image,GreenChannel, black_color->green,white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelImageChannel(image,BlueChannel, black_color->blue,white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelImageChannel(image,OpacityChannel, black_color->opacity,white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelImageChannel(image,IndexChannel, black_color->index,white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status|=LevelizeImageChannel(image,RedChannel, black_color->red,white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelizeImageChannel(image,GreenChannel, black_color->green,white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelizeImageChannel(image,BlueChannel, black_color->blue,white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelizeImageChannel(image,OpacityChannel, black_color->opacity,white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelizeImageChannel(image,IndexChannel, black_color->index,white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(PixelIntensityToQuantum(image,p))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white, 1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness, Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness, Quantum *red,Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) { /* Modulate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } } } } /* Modulate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange- image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange- image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange- image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange- GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange- GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImageChannel) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
vector_addition.h
//============================================================================== // // Copyright 2018 The InsideLoop Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //============================================================================== #ifndef IL_VECTOR_ADDITION_H #define IL_VECTOR_ADDITION_H #include <il/Array.h> #include <il/benchmark/tools/timer/Benchmark.h> #include <cstdio> #ifdef IL_TBB #include <tbb/tbb.h> #endif #ifdef IL_CILK #include <cilk/cilk.h> #endif void vector_addition() { std::printf( "****************************************************************" "****************\n"); std::printf("* Vector addition\n"); std::printf( "****************************************************************" "****************\n"); il::Array<il::int_t> size{ il::value, {100, 1000, 10000, 100000, 1000000, 10000000, 100000000}}; for (il::int_t n : size) { std::printf("Size of array: %td\n", n); auto vector_addition_serial = [&n](il::io_t, il::BState& state) { il::Array<double> v1{n, 0.0}; il::Array<double> v2{n, 0.0}; while (state.keep_running()) { for (il::int_t k = 0; k < v2.size(); ++k) { v2[k] += v1[k]; } } }; double time_serial{il::benchmark(vector_addition_serial) / n}; std::printf("Serial: %7.3e s\n", time_serial); #ifdef IL_OPENMP auto vector_addition_openmp = [&n](il::io_t, il::BState& state) { il::Array<double> v1{n, 0.0}; il::Array<double> v2{n, 0.0}; while (state.keep_running()) { #pragma omp parallel for for (il::int_t k = 0; k < v2.size(); ++k) { v2[k] += v1[k]; } } }; double time_openmp{il::benchmark(vector_addition_openmp) / n}; std::printf("OpenMP: %7.3e s, Ratio: %5.3f\n", time_openmp, time_serial / time_openmp); #endif #ifdef IL_TBB auto vector_addition_tbb = [&n](il::io_t, il::BState& state) { il::Array<double> v1{n, 0.0}; il::Array<double> v2{n, 0.0}; while (state.keep_running()) { tbb::parallel_for( tbb::blocked_range<il::int_t>(0, v2.size()), [=, &v1, &v2](const tbb::blocked_range<il::int_t>& range) { for (il::int_t k{range.begin()}; k < range.end(); ++k) { v2[k] += v1[k]; } }); } }; double time_tbb{il::benchmark(vector_addition_tbb) / n}; std::printf(" TBB: %7.3e s, Ratio: %5.3f\n", time_tbb, time_serial / time_tbb); #endif #ifdef IL_CILK auto vector_addition_cilk = [&n](il::io_t, il::BState& state) { il::Array<double> v1{n, 0.0}; il::Array<double> v2{n, 0.0}; while (state.keep_running()) { cilk_for(il::int_t k = 0; k < n; ++k) { v2[k] += v1[k]; } } }; double time_cilk{il::benchmark(vector_addition_cilk) / n}; std::printf(" Cilk: %7.3e s, Ratio: %5.3f\n", time_cilk, time_serial / time_cilk); #endif std::printf("\n"); } } #endif // IL_VECTOR_ADDITION_H
GB_binop__max_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__max_fp32 // A.*B function (eWiseMult): GB_AemultB__max_fp32 // A*D function (colscale): GB_AxD__max_fp32 // D*A function (rowscale): GB_DxB__max_fp32 // C+=B function (dense accum): GB_Cdense_accumB__max_fp32 // C+=b function (dense accum): GB_Cdense_accumb__max_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_fp32 // C=scalar+B GB_bind1st__max_fp32 // C=scalar+B' GB_bind1st_tran__max_fp32 // C=A+scalar GB_bind2nd__max_fp32 // C=A'+scalar GB_bind2nd_tran__max_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = fmaxf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = fmaxf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_FP32 || GxB_NO_MAX_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__max_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__max_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__max_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__max_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__max_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__max_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = fmaxf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__max_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = fmaxf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = fmaxf (x, aij) ; \ } GrB_Info GB_bind1st_tran__max_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = fmaxf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__max_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__le_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp64) // A*D function (colscale): GB (_AxD__le_fp64) // D*A function (rowscale): GB (_DxB__le_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__le_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__le_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp64) // C=scalar+B GB (_bind1st__le_fp64) // C=scalar+B' GB (_bind1st_tran__le_fp64) // C=A+scalar GB (_bind2nd__le_fp64) // C=A'+scalar GB (_bind2nd_tran__le_fp64) // C type: bool // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_FP64 || GxB_NO_LE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gbdt.h
#ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include "score_updater.hpp" #include <cstdio> #include <vector> #include <string> #include <fstream> #include <memory> #include <mutex> #include <map> namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const BoostingConfig* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const BoostingConfig* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ virtual const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ virtual int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = max_feature_idx_ + 2; // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ virtual bool SaveModelToFile(int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ virtual std::string SaveModelToString(int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ virtual const char* SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const BoostingConfig* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<BoostingConfig> gbdt_config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; std::vector<double> class_default_output_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
log.h
/** * Copyright (c) 2017 rxi * * This library is free software; you can redistribute it and/or modify it * under the terms of the MIT license. See `log.c` for details. */ #pragma once #ifdef USE_LOG #include <stdio.h> #include <stdarg.h> #include <cstring> #include <string> #define LOG_VERSION "0.1.0" typedef void (*log_LockFn)(void *udata, int lock); enum { LOG_TRACE, LOG_DEBUG, LOG_INFO, LOG_WARN, LOG_ERROR, LOG_FATAL }; #define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define RAPIDS_FILE (__FILENAME__) #define log_trace(...) log_log(LOG_TRACE, RAPIDS_FILE, __LINE__, __VA_ARGS__) #define log_debug(...) log_log(LOG_DEBUG, RAPIDS_FILE, __LINE__, __VA_ARGS__) #define log_info(...) log_log(LOG_INFO, RAPIDS_FILE, __LINE__, __VA_ARGS__) #define log_warn(...) log_log(LOG_WARN, RAPIDS_FILE, __LINE__, __VA_ARGS__) #define log_error(...) log_log(LOG_ERROR, RAPIDS_FILE, __LINE__, __VA_ARGS__) #define log_fatal(...) log_log(LOG_FATAL, RAPIDS_FILE, __LINE__, __VA_ARGS__) void log_set_udata(void *udata); void log_set_lock(log_LockFn fn); void log_set_fp(FILE *fp); void log_set_level(int level); void log_set_quiet(int enable); void log_log(int level, const char *file, int line, const char *fmt, ...); inline void print_str(std::string str) { #ifdef DEBUG static thread_local bool is_first = true; if (is_first) { #pragma omp single log_info("%s", str.c_str()); is_first = false; } #endif } #else //use log #define log_trace(...) #define log_debug(...) #define log_info(...) #define log_warn(...) #define log_error(...) #define log_fatal(...) #endif //use log
matrix.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "pix.h" void *vdup(void *v, int vlen) { void *r; if (v == NULL || vlen == 0) return NULL; if ((r = malloc(vlen)) == NULL) pstop("!!! vdup: not enough memory.\n"); memcpy(r, v, vlen); return r; } void vadd_s(int vlen, int op, short *v1, short *v2, short *r) { int i; if (op == 1) { #pragma omp parallel for private(i) for (i=0; i < vlen; i++) r[i] = (short)(v1[i] + v2[i]); } else if (op == 0) { #pragma omp parallel for private(i) for (i=0; i < vlen; i++) r[i] = (short)((v1[i] > v2[i]) ? (v1[i] - v2[i]) : 0); } else { #pragma omp parallel for private(i) for (i=0; i < vlen; i++) r[i] = (short)(v1[i] - v2[i]); } } int vmax_s(int vlen, short *v, int *i_max) { int i, im; short max; im = 0; max = v[0]; for (i=1; i < vlen; i++) { if (max < v[i]) { im = i; max = v[i]; } } if (i_max != NULL) *i_max = im; return (int)max; } void vmaxmin_i(int vlen, int *v, int *vmax, int *i_max, int *vmin, int *i_min) { int i, im1, im2; int max, min; im1 = 0; im2 = 0; max = v[0]; min = v[0]; for (i=1; i < vlen; i++) { if (max < v[i]) { im1 = i; max = v[i]; } if (min > v[i]) { im2 = i; min = v[i]; } } if (i_max != NULL) *i_max = im1; if (i_min != NULL) *i_min = im2; *vmax = max; *vmin = min; } void mx_sub_s(int dim_x, int dim_y, int *sdim, short *mx, short *mr) { int x1, x2, y1, y2, sdim_x; int i, j, xx, yy; x1 = sdim[0]; x2 = sdim[1]; y1 = sdim[2]; y2 = sdim[3]; sdim_x = x2-x1+1; #pragma omp parallel for private(i,j,xx,yy) for (j=y1; j <= y2; j++) { for (i=x1; i <= x2; i++) { yy = (i-x1) + (j-y1)*sdim_x; xx = i + j*dim_x; mr[yy] = mx[xx]; }} } // void mx_subT_s(int dim_x, int dim_y, int *sdim, short *mx, short *mr) // { // int x1, x2, y1, y2, sdim_y; // int i, j, xx, yy; // // x1 = sdim[0]; // x2 = sdim[1]; // y1 = sdim[2]; // y2 = sdim[3]; // sdim_y = y2-y1+1; // // #pragma omp parallel for private(i,j,xx,yy) // for (j=y1; j <= y2; j++) { // for (i=x1; i <= x2; i++) { // yy = (i-x1)*sdim_y + (j-y1); // if (i >= 0 && j >= 0 && i < dim_x && j < dim_y) { // xx = i + j*dim_x; // mr[yy] = mx[xx]; // } // else // mr[yy] = 0; // }} // } void mx_rsub_s(int dim_x, int dim_y, int *sdim, short *mx, short *mr) { int x1, x2, y1, y2; int i, j, xx; x1 = sdim[0]; x2 = sdim[1]; y1 = sdim[2]; y2 = sdim[3]; #pragma omp parallel for private(i,j,xx) for (j=y1; j <= y2; j++) { for (i=x1; i <= x2; i++) { xx = i + j*dim_x; mr[xx] = mx[xx]; }} } int mx_inv(int N, double *A, int NRHS, double *B) { double *work; int *ipiv, lwork, NB=16, info; char *uplo="U"; lwork = N*NB; work = malloc(lwork*sizeof(double)); ipiv = malloc(N*sizeof(int)); if (work==NULL || ipiv==NULL) pstop("!!! mx_inv: not enough memory.\n"); dsysv_(uplo, &N, &NRHS, A, &N, ipiv, B, &N, work, &lwork, &info, 1); free(work); free(ipiv); return info; }
omp_single_copyprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp single copyprivate directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp single copyprivate</ompts:directive> <ompts:dependences>omp parllel,omp critical</ompts:dependences> <ompts:testcode> #include "omp_testsuite.h" int j; #pragma omp threadprivate(j) int <ompts:testcode:functionname>omp_single_copyprivate</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int result; int nr_iterations; </ompts:orphan:vars> result = 0; nr_iterations = 0; #pragma omp parallel { <ompts:orphan> int i; for (i = 0; i < LOOPCOUNT; i++) { /* int thread; thread = omp_get_thread_num (); */ #pragma omp single <ompts:check>copyprivate(j)</ompts:check> { nr_iterations++; j = i; /*printf ("thread %d assigns, j = %d, i = %d\n", thread, j, i);*/ } /* #pragma omp barrier*/ #pragma omp critical { /*printf ("thread = %d, j = %d, i = %d\n", thread, j, i);*/ result = result + j - i; } #pragma omp barrier } /* end of for */ </ompts:orphan> } /* end of parallel */ return ((result == 0) && (nr_iterations == LOOPCOUNT)); } </ompts:testcode> </ompts:test>
particleSimulator-special.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #include <math.h> #include <unistd.h> #include <sys/types.h> #include "parseFlags.h" #define maxlength 1024 typedef struct { double x; double y; } coordinates; typedef struct { coordinates position; coordinates velocity; double mass; double size; } particle; typedef struct { float dt; int timestep; } timestepChange; void usage ( char *progName ) { printf ( "usage: %s input particle.01.data dt [file] numSteps 50 output tmp/timestep threads 10 time [0|1]\n", progName ); } int main(int argc, char **argv) { int i,j,k,size,numsteps,filesize; int numthreads,timecheck,numFlags; double tmp,currentDT,nextDT; double start,end; double fx, fy; double dx, dy, dist, f; float px,py,vx,vy,mass; char *input, *output, *outputnumbered, *delta; int numDT; timestepChange *dt; particle *p; coordinates *force; FILE *infile,*outfile,*deltaT; /* get all the command-line arguments and put them on the books */ commandLineFlagType flag[] = { /* stringToBeMatched, variableType, pointerToVariable */ {"input", _string, &input }, {"dt", _string, &delta }, {"time", _int, &timecheck }, {"numSteps", _int, &numsteps }, {"output", _string, &output }, {"threads", _int, &numthreads } }; numFlags = sizeof ( flag ) / sizeof ( commandLineFlagType ); usageErrorType parseErrorCode = parseArgs ( argc, argv, numFlags, flag ) ; if ( parseErrorCode == argError || parseErrorCode == parseError || argc < 7 ){ usage( argv[0] ); return ( 1 ); } deltaT = fopen(delta,"r"); if(deltaT == NULL){ perror(delta); exit(1); } filesize = lseek(fileno(deltaT),0,SEEK_END); lseek(fileno(deltaT),-filesize,SEEK_CUR); dt = malloc(sizeof(timestepChange) * (int)(filesize/4)); numDT = 0; while(fscanf(deltaT,"%d %f",&(dt[numDT].timestep),&(dt[numDT].dt)) > 0) numDT++; fclose(deltaT); infile = fopen(input,"r"); if(infile == NULL){ perror(input); exit(1); } filesize = lseek(fileno(infile),0,SEEK_END); lseek(fileno(infile),-filesize,SEEK_CUR); p = malloc(sizeof(particle)*(filesize/44)); size = 0; while(fscanf(infile,"%f %f %f %f %f",&px,&py,&vx,&vy,&mass) > 0) { p[size].position.x = px; p[size].position.y = py; p[size].velocity.x = vx; p[size].velocity.y = vy; p[size].mass = mass; p[size].size = sqrt(p[size].mass); size++; }; fclose(infile); outputnumbered = malloc(sizeof(char)*maxlength); force = malloc(sizeof(coordinates)*size); omp_set_num_threads(numthreads); if(timecheck) start = omp_get_wtime(); for(i = 0; i < numsteps; i++) { /* Each timeStep */ for(j = 0; j < numDT; j++) { if(dt[j].timestep == i) { currentDT = dt[j].dt; j = numDT; } } #pragma omp parallel for shared(p,force,size) private(fx,fy) for(j = 0; j < size; j++){ /* Each Particle */ fx = 0; fy = 0; #pragma omp parallel for shared(p) private(dist,f,dx,dy) reduction(+:fx,fy) for(k = 0; k < size; k++){ /* calculate the forces of each kth particle on the jth particle */ if (j != k) { dx = (p[j].position.x - p[k].position.x); dy = (p[j].position.y - p[k].position.y); dist = hypot(dx,dy); if(dist > .03){ f = -.02 * p[j].mass * p[k].mass; fx += dx/dist*f; fy += dy/dist*f; } } } force[j].x = fx; force[j].y = fy; } #pragma omp parallel for shared(p,force,dt) private(tmp) for(j = 0; j < size; j++){ p[j].velocity.x += force[j].x/p[j].mass * currentDT; p[j].velocity.y += force[j].y/p[j].mass * currentDT; tmp = p[j].position.x + p[j].velocity.x * currentDT; if(tmp <= -3 || tmp >= 3) { p[j].velocity.x /= -300; tmp = p[j].position.x + p[j].velocity.x * currentDT; } p[j].position.x = tmp; tmp = p[j].position.y + p[j].velocity.y * currentDT; if(tmp <= -3|| tmp >= 3) { p[j].velocity.y /= -300; tmp = p[j].position.y + p[j].velocity.y * currentDT; } p[j].position.y = tmp; } snprintf(outputnumbered,maxlength,"%s.%d.txt",output,i); outfile = fopen(outputnumbered,"w"); for(j = 0; j < size; j++){ fprintf(outfile,"%f %f %f %f %f\n",p[j].position.x,p[j].position.y,p[j].velocity.x,p[j].velocity.y,p[j].mass); } fclose(outfile); } if(timecheck){ end = omp_get_wtime(); printf("%d %f\n",numthreads,end-start); } }
compiled.c
/* Differential evolution MCMC stepper. */ #define _GNU_SOURCE // sincos isn't standard? #include <math.h> #include <stdlib.h> #include <stdio.h> // for debugging // Random library with a separate generator for each thread of // an OpenMP threaded program. Assumes max 64 threads. If OpenMP is // not available, then operates single threaded. #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif // Limit to the number of threads so static thread-local data can be // pre-allocated with the right size. #define MAX_THREADS 64 // ==== Generator definition ==== // Uses: // Salmon, J. K.; Moraes, M. A.; Dror, R. O.; Shaw, D. E. (2011) // Parallel random numbers: as easy as 1, 2, 3. In Proceedings of 2011 // International Conference for High Performance Computing, Networking, // Storage and Analysis; SC '11; ACM: New York, NY; p 16:1016:12. // doi: 10.1145/2063384.2063405 // https://www.deshawresearch.com/resources_random123.html v1.09 // may want to swap it for a different generator, and update the following #include <Random123/threefry.h> typedef threefry4x64_ctr_t r123_ctr_t; typedef threefry4x64_key_t r123_key_t; typedef threefry4x64_ukey_t r123_ukey_t; #define r123_init threefry4x64keyinit #define r123_next threefry4x64 #define R123_SIZE 4 // the 4 in 4x64 typedef uint64_t randint_t; // the 64 in 4x64 const randint_t R123_MAX = 18446744073709551615UL; const double R123_TO_01 = 1.0/18446744073709551616.0; const double R123_TO_M11 = 2.0/18446744073709551616.0; // ==== end generator definition ==== typedef struct { r123_ctr_t counter; // position in sequence r123_key_t key; // seed r123_ctr_t values; // cached values not yet used int have_normal; // Have a precomputer random normal double normal; // the precomputed random normal } Random; Random streams[MAX_THREADS]; // Max of 64 different threads in OpenMP double u_01_open(randint_t v) { return (((double)v) + 0.5)*R123_TO_01; } double u_m11_closed(randint_t v) { return ((double)((int64_t)v) + 0.5)*R123_TO_M11; } void _rand_init(randint_t seed) { int thread_id = omp_get_thread_num(); Random *rng = streams + thread_id; r123_ukey_t user_key; r123_key_t counter; int k; if (thread_id >= MAX_THREADS) { printf("Too many threads for random number generator. Set OMP_NUM_THREADS=%d\n", MAX_THREADS); exit(1); } for (k = 0; k < R123_SIZE; k++) user_key.v[k] = counter.v[k] = 0; user_key.v[0] = seed; //user_key.v[1] = omp_get_thread_num(); rng->key = r123_init(user_key); rng->counter = counter; //printf("%d initializing %p with seed %llu and counter %llu\n", omp_get_thread_num(), rng, rng->key.v[0], rng->counter.v[0]); rng->have_normal = 0; } void rand_init(randint_t seed) { #ifdef _OPENMP #pragma omp parallel #endif _rand_init(seed); } randint_t rand_next(void) { Random *rng = streams+omp_get_thread_num(); //printf("retrieving from %p with key %ld and counter %ld\n",rng, rng->key.v[0], rng->counter.v[0]); if (rng->counter.v[0]%R123_SIZE == 0) { rng->values = r123_next(rng->counter, rng->key); } return rng->values.v[(rng->counter.v[0]++)%R123_SIZE]; } double randn(void) { Random *rng = &streams[omp_get_thread_num()]; if (rng->have_normal) { rng->have_normal = 0; return rng->normal; } else { // Box-Muller transform converts two ints into two normals // Return one now and save the other for later. double x, y, r, arg; arg = M_PI*u_m11_closed(rand_next()); x = sin(arg); y = cos(arg); r = sqrt(-2. * log(u_01_open(rand_next()))); rng->have_normal = 1; rng->normal = y*r; return x*r; } } randint_t randint(randint_t range) { while (1) { randint_t value = rand_next(); // TODO: correct for very tiny bias against higher numbers. // Something like the following? // if (value > R123_MAX-(R123_MAX%range)) continue; return value%range; } } double randu(void) { return u_01_open(rand_next()); } /* draw k unique from n objects not equal to q */ // Specialized for k << n. If n is large and k -> n then argsort on // a random uniform draw is a better bet. If you don't want to exclude // any numbers, set not_matching to total_num. void rand_draw(int num_to_draw, int total_num, randint_t not_matching, randint_t p[]) { int i, j; for (i=0; i < num_to_draw; i++) { while (1) { int proposed = randint(total_num); int unique = (proposed != not_matching); for (j=0; j < i && unique; j++) unique = (proposed != p[j]); if (unique) { p[i] = proposed; break; } // TODO: maybe check that num_to_draw is total_num + 1 } } } #if 0 #include <stdio.h> #include <string.h> #include <time.h> randint_t random_seed() { randint_t seed; FILE* urandom = fopen("/dev/urandom", "r"); fread(&seed, sizeof(seed), 1, urandom); fclose(urandom); return seed; } void main(int argc, char *argv[]) { int j, k; randint_t seed, draw[10]; seed = (argc == 1 ? random_seed() : atoi(argv[1])); printf("seed: %ld\n", seed); rand_init(seed); printf("i randint(10):\n"); #pragma omp parallel for for (k=0; k < 10; k++) printf("i %d %ld\n", omp_get_thread_num(), randint(10)); printf("u randu:\n"); #pragma omp parallel for for (k=0; k < 10; k++) printf("u %d %g\n", omp_get_thread_num(), randu()); printf("n randn:\n"); #pragma omp parallel for for (k=0; k < 10; k++) printf("n %d %g\n", omp_get_thread_num(), randn()); printf("d rand_draw(10,52,!5):\n"); #pragma omp parallel for private(draw, j) for (k=0; k < 10; k++) { char buf[200]; rand_draw(10, 52, 5, draw); sprintf(buf, "d %d ", omp_get_thread_num()); for (j=0; j < 10; j++) sprintf(buf+strlen(buf), "%ld ", draw[j]); printf("%s\n", buf); } } #endif #define _SNOOKER 0 #define _DE 1 #define _DIRECT 2 #define EPS 1e-6 #define MAX_CHAINS 20 /* Generates offspring using METROPOLIS HASTINGS monte-carlo markov chain The number of chains may be smaller than the population size if the population is selected from both the current generation and the ancestors. */ void _perform_step(int qq, int Nchain, int Nvar, int NCR, double pop[], double CR[][2], int max_pairs, double eps, double snooker_rate, double de_rate, double noise, double scale, double x_new[], double step_alpha[], double CR_used[]) { randint_t chains[2*MAX_CHAINS]; double u = randu(); int alg = (u < snooker_rate ? _SNOOKER : u < de_rate ? _DE : _DIRECT); double *xin = &pop[qq*Nvar]; int k; //for (k=0; k < NCR; k++) printf("CR %d: %g %g\n", k, CR[k][0], CR[k][1]); //printf("pop in c: "); //for (k=0; k < Nvar; k++) printf("%g ", pop[qq*Nvar+k]); //printf("\n"); switch (alg) { case _DE: // Use DE with cross-over ratio { int var, num_crossover, active; double crossover_ratio, CR_cdf, distance, jiggle; // Select to number of vector pair differences to use in update // using k ~ discrete U[1, max pairs] int num_pairs = randint(max_pairs)+1; // [PAK: same as k = DEversion[qq, 1] in matlab version] // Weight the size of the jump inversely proportional to the // number of contributions, both from the parameters being // updated and from the population defining the step direction. double gamma_scale = 2.38/sqrt(2 * Nvar * num_pairs); // [PAK: same as F=Table_JumpRate[len(vars), k] in matlab version] // Select 2*k members at random different from the current member rand_draw(2*num_pairs, Nchain, qq, chains); // Select crossover ratio u = randu(); CR_cdf = 0.; for (k=0; k < NCR-1; k++) { CR_cdf += CR[k][1]; if (u <= CR_cdf) break; } crossover_ratio = CR[k][0]; CR_used[qq] = crossover_ratio; // Select the dims to update based on the crossover ratio, making // sure at least one dim is selected num_crossover = 0; for (var=0; var < Nvar || num_crossover == 0; var++) { if (var == Nvar) { active = randint(Nvar); } else if (randu() <= crossover_ratio) { active = var; } else { x_new[var] = 0.; continue; } num_crossover++; // Find and average step from the selected pairs distance = 0.; for (k=0; k < num_pairs; k++) { distance += pop[chains[2*k]*Nvar + active] - pop[chains[2*k+1]*Nvar + active]; } // Apply that step with F scaling and noise jiggle = 1 + eps * (2 * randu() - 1); x_new[active] = jiggle*gamma_scale*distance; } step_alpha[qq] = 1.; break; } case _SNOOKER: // Use snooker update { double num, denom, gamma_scale; // Select current and three others rand_draw(3, Nchain, qq, chains); double *z = &pop[chains[0]*Nvar]; double *R1 = &pop[chains[1]*Nvar]; double *R2 = &pop[chains[2]*Nvar]; // Find the step direction and scale it to the length of the // projection of R1-R2 onto the step direction. // TODO: population sometimes not unique! for (k=0; k < Nvar; k++) x_new[k] = xin[k] - z[k]; while (1) { denom = 0.; for (k=0; k < Nvar; k++) denom += x_new[k]*x_new[k]; if (denom != 0.) break; for (k=0; k < Nvar; k++) x_new[k] = EPS*randn(); } num = 0.; for (k=0; k < Nvar; k++) num += ((R1[k]-R2[k])*x_new[k]); // Step using gamma of 2.38/sqrt(2) + U(-0.5, 0.5) gamma_scale = (1.2 + randu())*num/denom; for (k=0; k < Nvar; k++) x_new[k] *= gamma_scale; // Scale Metropolis probability by (||xi* - z||/||xi - z||)^(d-1) num = 0.; for (k=0; k < Nvar; k++) num += (xin[k]+x_new[k]-z[k])*(xin[k]+x_new[k]-z[k]); step_alpha[qq] = pow(num/denom, (Nvar-1)/2); CR_used[qq] = 0.; break; } case _DIRECT: // Use one pair and all dimensions { // Note that there is no F scaling, dimension selection or noise int p[2]; rand_draw(2, Nchain, qq, chains); double *R1 = &pop[chains[0]*Nvar]; double *R2 = &pop[chains[1]*Nvar]; for (k=0; k < Nvar; k++) x_new[k] = R1[k] - R2[k]; step_alpha[qq] = 1.; CR_used[qq] = 0.; break; } } //printf("%d -> ", alg); //for (k=0; k < Nvar; k++) printf("%g ", x_new[k]); //printf("\n"); // Update x_old with delta_x and noise for (k=0; k < Nvar; k++) x_new[k] *= scale; // [PAK] The noise term needs to depend on the fitting range // of the parameter rather than using a fixed noise value for all // parameters. The current parameter value is a pretty good proxy // in most cases (i.e., relative noise), but it breaks down if the // parameter is zero, or if the range is something like 1 +/- eps. // absolute noise //for (k=0; k < Nvar; k++) x_new[k] += xin[k] + scale*noise*randn(); // relative noise for (k=0; k < Nvar; k++) x_new[k] += xin[k]*(1.+scale*noise*randn()); //printf("%d -> ", alg); //for (k=0; k < Nvar; k++) printf("%g ", x_new[k]); //printf("\n"); // no noise //for (k=0; k < Nvar; k++) x_new[k] += xin[k]; } void de_step(int Nchain, int Nvar, int NCR, double pop[], double CR[][2], int max_pairs, double eps, double snooker_rate, double noise, double scale, double x_new[], double step_alpha[], double CR_used[]) { int qq; double de_rate = snooker_rate + 0.8 * (1-snooker_rate); //Choose snooker, de or direct according to snooker_rate, and 80:20 // ratio of de to direct. // Chains evolve using information from other chains to create offspring #ifdef _OPENMP #pragma omp parallel for #endif for (qq = 0; qq < Nchain; qq++) { _perform_step(qq, Nchain, Nvar, NCR, pop, CR, max_pairs, eps, snooker_rate, de_rate, noise, scale, &x_new[qq*Nvar], step_alpha, CR_used); } } void bounds_reflect(int Nchain, int Nvar, double pop[], double low[], double high[]) { int k, p, idx; #ifdef _OPENMP #pragma omp parallel for private(idx) #endif for (p=0; p < Nchain; p++) { for (k=0; k < Nvar; k++) { idx = p*Nvar+k; if (pop[idx] < low[k]) { pop[idx] = 2*low[k] - pop[idx]; } else if (pop[idx] > high[k]) { pop[idx] = 2*high[k] - pop[idx]; } if (pop[idx] < low[k] || pop[idx] > high[k]) { pop[idx] = low[k] + randu()*(high[k]-low[k]); } } } } void bounds_clip(int Nchain, int Nvar, double pop[], double low[], double high[]) { int k, p, idx; #ifdef _OPENMP #pragma omp parallel for private(idx) #endif for (p=0; p < Nchain; p++) { for (k=0; k < Nvar; k++) { idx = p*Nvar+k; if (pop[idx] < low[k]) { pop[idx] = low[k]; } else if (pop[idx] > high[k]) { pop[idx] = high[k]; } } } } void bounds_fold(int Nchain, int Nvar, double pop[], double low[], double high[]) { int k, p, idx; #ifdef _OPENMP #pragma omp parallel for private(idx) #endif for (p=0; p < Nchain; p++) { for (k=0; k < Nvar; k++) { idx = p*Nvar+k; if (pop[idx] < low[k]) { if (isinf(high[k])) { pop[idx] = 2*low[k] - pop[idx]; } else { pop[idx] = high[k] - (low[k] - pop[idx]); } } else if (pop[idx] > high[k]) { if (isinf(low[k])) { pop[idx] = 2*high[k] - pop[idx]; } else { pop[idx] = low[k] - (high[k] - pop[idx]); } } if (pop[idx] < low[k] || pop[idx] > high[k]) { pop[idx] = low[k] + randu()*(high[k]-low[k]); } } } } void bounds_random(int Nchain, int Nvar, double pop[], double low[], double high[]) { int k, p, idx; #ifdef _OPENMP #pragma omp parallel for private(idx) #endif for (p=0; p < Nchain; p++) { for (k=0; k < Nvar; k++) { idx = p*Nvar+k; if (pop[idx] < low[k]) { if (isinf(high[k])) { pop[idx] = 2*low[k] - pop[idx]; } else { pop[idx] = low[k] + randu()*(high[k]-low[k]); } } else if (pop[idx] > high[k]) { if (isinf(low[k])) { pop[idx] = 2*high[k] - pop[idx]; } else { pop[idx] = low[k] + randu()*(high[k]-low[k]); } } } } } void bounds_ignore(int Nchain, int Nvar, double pop[], double low[], double high[]) { }
bfs_replicated.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #define _GNU_SOURCE #include "common.h" #include "oned_csr.h" #include "onesided.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csr_graph g; static int g_lg_local_queue_size; static int32_t g_local_queue_summary_size; //static int64_t g_local_queue_summary_size; static int32_t g_local_queue_size; //static int64_t g_local_queue_size; static int32_t g_global_queue_summary_size; //static int64_t g_global_queue_summary_size; static int32_t g_global_queue_size; //static int64_t g_global_queue_size; static unsigned long* g_in_queue; static unsigned long* g_in_queue_summary; static unsigned long* g_out_queue; static unsigned long* g_out_queue_summary; static unsigned long* g_visited; static void allocate_memory(void) { int32_t maxlocalverts = g.max_nlocalverts; // int64_t maxlocalverts = g.max_nlocalverts; // int lg_local_queue_size = lg_int64_t((maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared * ulong_bits_squared); int lg_local_queue_size = lg_int32_t((maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared * ulong_bits_squared); g_lg_local_queue_size = lg_local_queue_size; // int64_t local_queue_summary_size = (INT64_C(1) << lg_local_queue_size) / ulong_bits_squared; int32_t local_queue_summary_size = (INT32_C(1) << lg_local_queue_size) / ulong_bits_squared; int32_t local_queue_size = local_queue_summary_size * ulong_bits;// int64_t local_queue_size = local_queue_summary_size * ulong_bits; g_local_queue_summary_size = local_queue_summary_size; g_local_queue_size = local_queue_size; int32_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); // int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int32_t global_queue_size = MUL_SIZE(local_queue_size); // int64_t global_queue_size = MUL_SIZE(local_queue_size); g_global_queue_summary_size = global_queue_summary_size; g_global_queue_size = global_queue_size; g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long)); g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long)); g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long)); g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); } static void deallocate_memory(void) { free(g_in_queue); g_in_queue = NULL; free(g_in_queue_summary); g_in_queue_summary = NULL; free(g_out_queue); g_out_queue = NULL; free(g_out_queue_summary); g_out_queue_summary = NULL; free(g_visited); g_visited = NULL; } void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csr(tg, &g); allocate_memory(); /* Make sure all of the space is available */ deallocate_memory(); } void free_graph_data_structure(void) { free_oned_csr_graph(&g); /* deallocate_memory(); */ } int bfs_writes_depth_map(void) {return 1;} /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ //void run_bfs(int64_t root, int64_t* pred) void run_bfs(int32_t root, int32_t* pred) { allocate_memory(); const ptrdiff_t nlocalverts = g.nlocalverts; const size_t* const restrict rowstarts = g.rowstarts; const int32_t* const restrict column = g.column; // const int64_t* const restrict column = g.column; /* Set up the visited bitmap. */ const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; const int ulong_bits_squared = ulong_bits * ulong_bits; int32_t local_queue_summary_size = g_local_queue_summary_size;// int64_t local_queue_summary_size = g_local_queue_summary_size; int32_t local_queue_size = g_local_queue_size;// int64_t local_queue_size = g_local_queue_size; int lg_local_queue_size = g_lg_local_queue_size; int32_t global_queue_summary_size = g_global_queue_summary_size; // int64_t global_queue_summary_size = g_global_queue_summary_size; int32_t global_queue_size = g_global_queue_size; // int64_t global_queue_size = g_global_queue_size; //#define SWIZZLE_VERTEX(c) (((int64_t)(VERTEX_OWNER(c)) << lg_local_queue_size) | (int64_t)(VERTEX_LOCAL(c))) #define SWIZZLE_VERTEX(c) (((int32_t)(VERTEX_OWNER(c)) << lg_local_queue_size) | (int32_t)(VERTEX_LOCAL(c))) #if 0 // int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t)); int32_t* restrict column_swizzled = (int32_t*)xmalloc(nlocaledges * sizeof(int32_t)); { size_t i; for (i = 0; i < nlocaledges; ++i) { int32_t c = column[i]; // int64_t c = column[i]; column_swizzled[i] = SWIZZLE_VERTEX(c); } } #endif unsigned long* restrict in_queue = g_in_queue; memset(in_queue, 0, global_queue_size * sizeof(unsigned long)); unsigned long* restrict in_queue_summary = g_in_queue_summary; memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long)); unsigned long* restrict out_queue = g_out_queue; unsigned long* restrict out_queue_summary = g_out_queue_summary; unsigned long* restrict visited = g_visited; memset(visited, 0, local_queue_size * sizeof(unsigned long)); //#define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ulong_bits; int bit_idx = vs % ulong_bits; unsigned long mask = (1UL << bit_idx); #define SET_IN(v) do {int32_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ulong_bits; int bit_idx = vs % ulong_bits; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ulong_bits] |= (1UL << (word_idx % ulong_bits)); in_queue[word_idx] |= mask;} while (0) #define TEST_IN(vs) (((in_queue_summary[vs / ulong_bits / ulong_bits] & (1UL << ((vs / ulong_bits) % ulong_bits))) != 0) && ((in_queue[vs / ulong_bits] & (1UL << (vs % ulong_bits))) != 0)) #define TEST_VISITED_LOCAL(v) ((visited[(v) / ulong_bits] & (1UL << ((v) % ulong_bits))) != 0) // #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0) #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0) SET_IN(root); {ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} if (VERTEX_OWNER(root) == rank) { pred[VERTEX_LOCAL(root)] = root; SET_VISITED_LOCAL(VERTEX_LOCAL(root)); } uint16_t cur_level = 0; while (1) { ++cur_level; #if 0 if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level); #endif memset(out_queue, 0, (nlocalverts + ulong_bits - 1) / ulong_bits * sizeof(unsigned long)); // memset(out_queue_summary, 0, (nlocalverts + ulong_bits_squared - 1) / ulong_bits_squared * sizeof(unsigned long)); ptrdiff_t i, ii; #if 0 #pragma omp parallel for schedule(static) for (i = 0; i < global_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { if (in_queue[i * ulong_bits + j]) val |= mask; } in_queue_summary[i] = val; } #endif unsigned long not_done = 0; #pragma omp parallel for schedule(static) reduction(|:not_done) for (ii = 0; ii < nlocalverts; ii += ulong_bits) { size_t i, i_end = ii + ulong_bits; if (i_end > nlocalverts) i_end = nlocalverts; for (i = ii; i < i_end; ++i) { if (!TEST_VISITED_LOCAL(i)) { size_t j, j_end = rowstarts[i + 1]; for (j = rowstarts[i]; j < j_end; ++j) { int32_t v1 = column[j]; // int64_t v1 = column[j]; int32_t v1_swizzled = SWIZZLE_VERTEX(v1); // int64_t v1_swizzled = SWIZZLE_VERTEX(v1); if (TEST_IN(v1_swizzled)) { pred[i] = (v1 & INT32_C(0xFFFFFF)) | ((int32_t)cur_level << 24); // pred[i] = (v1 & INT64_C(0xFFFFFFFFFFFF)) | ((int64_t)cur_level << 48); not_done |= 1; SET_VISITED_LOCAL(i); break; } } } } } #if 1 #pragma omp parallel for schedule(static) for (i = 0; i < local_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { unsigned long full_val = out_queue[i * ulong_bits + j]; visited[i * ulong_bits + j] |= full_val; if (full_val) val |= mask; } out_queue_summary[i] = val; // not_done |= val; } #endif MPI_Allreduce(MPI_IN_PLACE, &not_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD); if (not_done == 0) break; MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); } deallocate_memory(); } //void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) void get_vertex_distribution_for_pred(size_t count, const int32_t* vertex_p, int* owner_p, size_t* local_p) { const int32_t* restrict vertex = vertex_p;//const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { int32_t v = vertex[i];// int64_t v = vertex[i]; owner[i] = VERTEX_OWNER(v); local[i] = VERTEX_LOCAL(v); } } //int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) int32_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
364191_icc_so12.c
#define _POSIX_C_SOURCE 200809L #define START_TIMER(S) \ struct timeval start_##S, end_##S; \ gettimeofday(&start_##S, NULL); #define STOP_TIMER(S, T) \ gettimeofday(&end_##S, NULL); \ T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000; #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict sp_source_id)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]])sp_source_id_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 6; int t_blk_size = 2 * sf * (time_M - time_m); START_TIMER(section0) for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t1 = (time + 2) % (3), t0 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_id_vec, save_src_u_vec, source_id_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ } STOP_TIMER(section0, timers) for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3)) { START_TIMER(section1) /* Begin section1 */ /* End section1 */ STOP_TIMER(section1, timers) } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_id_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict sp_source_id)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_id_vec->size[1]][sp_source_id_vec->size[2]])sp_source_id_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 12][y - time + 12][z + 12]*vp[x - time + 12][y - time + 12][z + 12]); u[t2][x - time + 12][y - time + 12][z + 12] = (r6*(-r7*(-2.0F*u[t0][x - time + 12][y - time + 12][z + 12] + u[t1][x - time + 12][y - time + 12][z + 12])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t0][x - time + 12][y - time + 12][z + 12]) - 2.67222496e-7F*(u[t0][x - time + 6][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 6][z + 12] + u[t0][x - time + 12][y - time + 12][z + 6] + u[t0][x - time + 12][y - time + 12][z + 18] + u[t0][x - time + 12][y - time + 18][z + 12] + u[t0][x - time + 18][y - time + 12][z + 12]) + 4.61760473e-6F*(u[t0][x - time + 7][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 7][z + 12] + u[t0][x - time + 12][y - time + 12][z + 7] + u[t0][x - time + 12][y - time + 12][z + 17] + u[t0][x - time + 12][y - time + 17][z + 12] + u[t0][x - time + 17][y - time + 12][z + 12]) - 3.96825406e-5F*(u[t0][x - time + 8][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 8][z + 12] + u[t0][x - time + 12][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 12][z + 16] + u[t0][x - time + 12][y - time + 16][z + 12] + u[t0][x - time + 16][y - time + 12][z + 12]) + 2.35155796e-4F*(u[t0][x - time + 9][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 9][z + 12] + u[t0][x - time + 12][y - time + 12][z + 9] + u[t0][x - time + 12][y - time + 12][z + 15] + u[t0][x - time + 12][y - time + 15][z + 12] + u[t0][x - time + 15][y - time + 12][z + 12]) - 1.19047622e-3F*(u[t0][x - time + 10][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 10][z + 12] + u[t0][x - time + 12][y - time + 12][z + 10] + u[t0][x - time + 12][y - time + 12][z + 14] + u[t0][x - time + 12][y - time + 14][z + 12] + u[t0][x - time + 14][y - time + 12][z + 12]) + 7.6190478e-3F*(u[t0][x - time + 11][y - time + 12][z + 12] + u[t0][x - time + 12][y - time + 11][z + 12] + u[t0][x - time + 12][y - time + 12][z + 11] + u[t0][x - time + 12][y - time + 12][z + 13] + u[t0][x - time + 12][y - time + 13][z + 12] + u[t0][x - time + 13][y - time + 12][z + 12]) - 3.97703713e-2F*u[t0][x - time + 12][y - time + 12][z + 12])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_id[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]]; u[t2][x - time + 12][y - time + 12][zind + 12] += r0; } } } } } } }
vc8.c
#define A(a, x, y, z) (a[(z) * ny * nx + (y) * nx + x]) static void inner_block(const float *restrict const f, float *restrict const fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float *restrict const fd_coeff, const int bx, const int by, const int bz, const int blocksize_x, const int blocksize_y, const int blocksize_z) { int x; int y; int z; float f_xx; const int x_start = bx * blocksize_x + 8; const int y_start = by * blocksize_y + 8; const int z_start = bz * blocksize_z + 8; const int x_end = x_start + blocksize_x <= nxi + 8 ? x_start + blocksize_x : nxi + 8; const int y_end = y_start + blocksize_y <= ny - 8 ? y_start + blocksize_y : ny - 8; const int z_end = z_start + blocksize_z <= nz - 8 ? z_start + blocksize_z : nz - 8; for (z = z_start; z < z_end; z++) { for (y = y_start; y < y_end; y++) { for (x = x_start; x < x_end; x++) { f_xx = 3 * fd_coeff[0] * A(f, x, y, z) + fd_coeff[1] * (A(f, x + 1, y, z) + A(f, x - 1, y, z) + A(f, x, y + 1, z) + A(f, x, y - 1, z) + A(f, x, y, z + 1) + A(f, x, y, z - 1)) + fd_coeff[2] * (A(f, x + 2, y, z) + A(f, x - 2, y, z) + A(f, x, y + 2, z) + A(f, x, y - 2, z) + A(f, x, y, z + 2) + A(f, x, y, z - 2)) + fd_coeff[3] * (A(f, x + 3, y, z) + A(f, x - 3, y, z) + A(f, x, y + 3, z) + A(f, x, y - 3, z) + A(f, x, y, z + 3) + A(f, x, y, z - 3)) + fd_coeff[4] * (A(f, x + 4, y, z) + A(f, x - 4, y, z) + A(f, x, y + 4, z) + A(f, x, y - 4, z) + A(f, x, y, z + 4) + A(f, x, y, z - 4)) + fd_coeff[5] * (A(f, x + 5, y, z) + A(f, x - 5, y, z) + A(f, x, y + 5, z) + A(f, x, y - 5, z) + A(f, x, y, z + 5) + A(f, x, y, z - 5)) + fd_coeff[6] * (A(f, x + 6, y, z) + A(f, x - 6, y, z) + A(f, x, y + 6, z) + A(f, x, y - 6, z) + A(f, x, y, z + 6) + A(f, x, y, z - 6)) + fd_coeff[7] * (A(f, x + 7, y, z) + A(f, x - 7, y, z) + A(f, x, y + 7, z) + A(f, x, y - 7, z) + A(f, x, y, z + 7) + A(f, x, y, z - 7)) + fd_coeff[8] * (A(f, x + 8, y, z) + A(f, x - 8, y, z) + A(f, x, y + 8, z) + A(f, x, y - 8, z) + A(f, x, y, z + 8) + A(f, x, y, z - 8)); A(fp, x, y, z) = A(model_padded2_dt2, x, y, z) * f_xx + 2 * A(f, x, y, z) - A(fp, x, y, z); } } } } static void inner(const float *restrict const f, float *restrict const fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const float *restrict const fd_coeff, const int step, const int blocksize_x, const int blocksize_y, const int blocksize_z, const int nbx, const int nby, const int nbz) { int bx; int by; int bz; int i; int sx; int sy; int sz; #pragma omp parallel for default(none) private(by, bx) for (bz = 0; bz < nbz; bz++) { for (by = 0; by < nby; by++) { for (bx = 0; bx < nbx; bx++) { inner_block(f, fp, nx, ny, nz, nxi, model_padded2_dt2, fd_coeff, bx, by, bz, blocksize_x, blocksize_y, blocksize_z); } } } for (i = 0; i < num_sources; i++) { sx = sources_x[i] + 8; sy = sources_y[i] + 8; sz = sources_z[i] + 8; A(fp, sx, sy, sz) += A(model_padded2_dt2, sx, sy, sz) * sources[i * source_len + step] * dt; } } void step(float *restrict f, float *restrict fp, const int nx, const int ny, const int nz, const int nxi, const float *restrict const model_padded2_dt2, const float dx, const float dt, const float *restrict const sources, const int *restrict const sources_x, const int *restrict const sources_y, const int *restrict const sources_z, const int num_sources, const int source_len, const int num_steps) { int step; float *tmp; float fd_coeff[9] = { -924708642.0f / 302702400 / (dx * dx), 538137600.0f / 302702400 / (dx * dx), -94174080.0f / 302702400 / (dx * dx), 22830080.0f / 302702400 / (dx * dx), -5350800.0f / 302702400 / (dx * dx), 1053696.0f / 302702400 / (dx * dx), -156800.0f / 302702400 / (dx * dx), 15360.0f / 302702400 / (dx * dx), -735.0f / 302702400 / (dx * dx) }; const int blocksize_x = 128; const int blocksize_y = 8; const int blocksize_z = 8; const int nbx = (int)((float)(nxi) / blocksize_x) + (int)(((nxi) % blocksize_x) != 0); const int nby = (int)((float)(ny - 16) / blocksize_y) + (int)(((ny - 16) % blocksize_y) != 0); const int nbz = (int)((float)(nz - 16) / blocksize_z) + (int)(((nz - 16) % blocksize_z) != 0); for (step = 0; step < num_steps; step++) { inner(f, fp, nx, ny, nz, nxi, model_padded2_dt2, dt, sources, sources_x, sources_y, sources_z, num_sources, source_len, fd_coeff, step, blocksize_x, blocksize_y, blocksize_z, nbx, nby, nbz); tmp = f; f = fp; fp = tmp; } }
CSC.h
#ifndef _CSC_H_ #define _CSC_H_ #include <iostream> #include <vector> #include <cstdlib> #include <algorithm> #include <cassert> #include <tuple> #include <random> #include "HeapEntry.h" #include "utility.h" #include <numeric> #include "Triple.h" extern "C" { #include "GTgraph/R-MAT/graph.h" } using namespace std; template <class IT, class NT> // IT, NT li dichiaro runtime (polimorfismo parametrico) class CSC { public: CSC():nnz(0), rows(0), cols(0) {} CSC(IT mynnz, IT m, IT n, int nt):nnz(mynnz),rows(m),cols(n) // costruttore di default { // Constructing empty Csc objects (size = 0) are not allowed. assert(nnz != 0 && cols != 0); colptr = my_malloc<IT>(cols + 1); rowids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); } CSC (Triple<IT,NT> * triples, IT mynnz, IT m, IT n); // altro costruttore di default CSC (IT scale, IT r_scale, IT r_edgefactor); // for tall-skiny matrix void make_empty() { if( nnz > 0 ) { my_free<IT>(rowids); my_free<NT>(values); nnz = 0; } if( cols > 0 ) { my_free<IT>(colptr); cols = 0; } rows = 0; } template <typename AddOperation> CSC (vector<tuple<IT,IT,NT> > & tuple, IT m, IT n, AddOperation addop); // costruttore template <typename AddOperation> void MergeDuplicates (AddOperation addop); // 1st method CSC(graph & G); CSC (IT * ri, IT * ci, NT * val, IT mynnz, IT m, IT n); CSC (const CSC<IT,NT> & rhs); // copy constructor CSC<IT,NT> & operator=(const CSC<IT,NT> & rhs); // assignment operator bool operator==(const CSC<IT,NT> & rhs); // ridefinizione == ~CSC() // distruttore { make_empty(); } bool isEmpty() { return ( nnz == 0 ); } void Sorted(); void shuffleIds(); CSC<IT,NT> SpRef (const vector<IT> & ri, const vector<IT> & ci); CSC<IT,NT> SpRef2 (const IT* ri, const IT rilen, const IT* ci, const IT cilen); void intersect (const IT* rowids_in, const NT* values_in, const IT len_in, const IT* ri, const IT len_ri, IT* rowids_out, NT* values_out, IT* len_out); IT rows; IT cols; IT nnz; // number of nonzeros IT totalcols; // for the parallel case IT * colptr; IT * rowids; NT * values; }; // copy constructor template <class IT, class NT> CSC<IT,NT>::CSC (const CSC<IT,NT> & rhs): nnz(rhs.nnz), rows(rhs.rows), cols(rhs.cols) { if(nnz > 0) { values = my_malloc<NT>(nnz); rowids = my_malloc<IT>(nnz); copy(rhs.values, rhs.values + nnz, values); copy(rhs.rowids, rhs.rowids + nnz, rowids); } if ( cols > 0) { colptr = my_malloc<IT>(cols + 1); copy(rhs.colptr, rhs.colptr + cols+1, colptr); } } template <class IT, class NT> CSC<IT,NT> & CSC<IT,NT>::operator= (const CSC<IT,NT> & rhs) // ridefinisce operatore = di assegnazione { if(this != &rhs) { if(nnz > 0) // if the existing object is not empty { my_free<IT>(rowids); my_free<NT>(values); } if(cols > 0) { my_free<IT>(colptr); } nnz = rhs.nnz; rows = rhs.rows; cols = rhs.cols; if(rhs.nnz > 0) // if the copied object is not empty { values = my_malloc<NT>(nnz); rowids = my_malloc<IT>(nnz); copy(rhs.values, rhs.values + nnz, values); copy(rhs.rowids, rhs.rowids + nnz, rowids); } if(rhs.cols > 0) { colptr = my_malloc<IT>(cols + 1); copy(rhs.colptr, rhs.colptr + cols+1, colptr); } } return *this; } //! Construct a CSC object from a GTgraph object //! GTgraph might have parallel edges; this constructor sums them up //! Assumes a sorted GTgraph (primary key: start) template <class IT, class NT> CSC<IT,NT>::CSC(graph & G):nnz(G.m), rows(G.n), cols(G.n) { // graph is like a triples object // typedef struct { // LONG_T m; // LONG_T n; // // Arrays of size 'm' storing the edge information // // A directed edge 'e' (0 <= e < m) from start[e] to end[e] // // had an integer weight w[e] // LONG_T* start; // LONG_T* end; // WEIGHT_T* w; // } graph; cout << "Graph nnz= " << G.m << " and n=" << G.n << endl; vector< Triple<IT,NT> > simpleG; vector< pair< pair<IT,IT>,NT> > currCol; currCol.push_back(make_pair(make_pair(G.start[0], G.end[0]), G.w[0])); for (IT k = 0 ; k < nnz-1 ; ++k) { if(G.start[k] != G.start[k+1] ) { std::sort(currCol.begin(), currCol.end()); simpleG.push_back(Triple<IT,NT>(currCol[0].first.first, currCol[0].first.second, currCol[0].second)); for(int i=0; i< currCol.size()-1; ++i) { if(currCol[i].first == currCol[i+1].first) { simpleG.back().val += currCol[i+1].second; } else { simpleG.push_back(Triple<IT,NT>(currCol[i+1].first.first, currCol[i+1].first.second, currCol[i+1].second)); } } vector< pair< pair<IT,IT>,NT> >().swap(currCol); } currCol.push_back(make_pair(make_pair(G.start[k+1], G.end[k+1]), G.w[k+1])); } // now do the last row sort(currCol.begin(), currCol.end()); simpleG.push_back(Triple<IT,NT>(currCol[0].first.first, currCol[0].first.second, currCol[0].second)); for(int i=0; i< currCol.size()-1; ++i) { if(currCol[i].first == currCol[i+1].first) { simpleG.back().val += currCol[i+1].second; } else { simpleG.push_back(Triple<IT,NT>(currCol[i+1].first.first, currCol[i+1].first.second, currCol[i+1].second)); } } nnz = simpleG.size(); cout << "[After duplicate merging] Graph nnz= " << nnz << " and n=" << G.n << endl << endl; colptr = my_malloc<IT>(cols + 1); rowids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); IT *work = my_malloc<IT>(cols); std::fill(work, work+cols, (IT) 0); // initilized to zero for (IT k = 0 ; k < nnz ; ++k) { IT tmp = simpleG[k].col; work [ tmp ]++ ; // col counts (i.e, w holds the "col difference array") } if(nnz > 0) { colptr[cols] = CumulativeSum (work, cols) ; // cumulative sum of w copy(work, work+cols, colptr); IT last; for (IT k = 0 ; k < nnz ; ++k) { rowids[ last = work[ simpleG[k].col ]++ ] = simpleG[k].row ; values[last] = simpleG[k].val ; } } my_free<IT>(work); } // Construct a Csc object from an array of "triple"s template <class IT, class NT> CSC<IT,NT>::CSC(Triple<IT,NT> * triples, IT mynnz, IT m, IT n):nnz(mynnz),rows(m),cols(n) { colptr = my_malloc<IT>(cols + 1); rowids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); vector< pair<IT,NT> > tosort (nnz); IT *work = my_malloc<IT>(cols); std::fill(work, work+cols, (IT) 0); for (IT k = 0 ; k < nnz ; ++k) { IT tmp = triples[k].col; work [ tmp ]++ ; // column counts (i.e, w holds the "col difference array") } if(nnz > 0) { colptr[cols] = CumulativeSum (work, cols) ; // cumulative sum of w copy(work, work+cols, colptr); IT last; for (IT k = 0 ; k < nnz ; ++k) { tosort[ work[triples[k].col]++] = make_pair( triples[k].row, triples[k].val); } #pragma omp parallel for for(IT i=0; i< cols; ++i) { sort(tosort.begin() + colptr[i], tosort.begin() + colptr[i+1]); typename vector<pair<IT,NT> >::iterator itr; // iterator is a dependent name IT ind; for(itr = tosort.begin() + colptr[i], ind = colptr[i]; itr != tosort.begin() + colptr[i+1]; ++itr, ++ind) { rowids[ind] = itr->first; values[ind] = itr->second; } } } my_free<IT>(work); } template <class IT, class NT> template <typename AddOperation> void CSC<IT,NT>::MergeDuplicates (AddOperation addop) { vector<IT> diff(cols,0); std::adjacent_difference (colptr+1, colptr+cols+1, diff.begin()); vector< vector<IT> > v_rowids; vector< vector<NT> > v_values; if(nnz > 0) { #pragma omp parallel for for(int i=0; i< cols; ++i) { for(size_t j= colptr[i]; j < colptr[i+1]; ++j) { v_rowids[i].push_back(rowids[j]); v_values[i].push_back(values[j]); while(j < colptr[i+1]-1 && rowids[j] == rowids[j+1]) { v_values[i].back() = addop(v_values[i].back(), values[j+1]); j++; // increment j diff[i]--; } } } } colptr[cols] = CumulativeSum (diff.data(), cols) ; // cumulative sum of diff copy(diff.begin(), diff.end(), colptr); // update the column pointers my_free<IT>(rowids); my_free<NT>(values); cout << "Old number of nonzeros before merging: " << nnz << endl; nnz = colptr[cols]; cout << "New number of nonzeros after merging: " << nnz << endl; rowids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); #pragma omp parallel for for(int i=0; i< cols; ++i) { copy(v_rowids[i].begin(), v_rowids[i].end(), rowids+colptr[i]); copy(v_values[i].begin(), v_values[i].end(), values+colptr[i]); } } //! this version handles duplicates in the input template <class IT, class NT> template <typename AddOperation> // n = kmerdict.size(), m = read_id, nnz = tuple.size() // CSC<size_t, size_t> *spmat = new CSC<size_t, size_t>(occurrences, read_id, kmerdict.size(), plus<size_t>()); CSC<IT,NT>::CSC (vector< tuple<IT,IT,NT> > & tuple, IT m, IT n, AddOperation addop): rows(m), cols(n) { NT nnz = tuple.size(); // there might be duplicates colptr = my_malloc<IT>(cols + 1); rowids = my_malloc<IT>(nnz); values = my_malloc<IT>(nnz); vector< pair<IT,NT> > tosort (nnz); IT *work = my_malloc<IT>(cols); std::fill(work, work+cols, (IT) 0); // riempi di 0 tutto for (IT k = 0 ; k < nnz ; ++k) { IT tmp = get<1>(tuple[k]); // temp = read_id work [ tmp ]++ ; // column counts (i.e, w holds the "col difference array") } if(nnz > 0) { colptr[cols] = CumulativeSum (work, cols) ; // cumulative sum of work, puntatore all'ultima posizione contiene copy(work, work+cols, colptr); IT last; for (IT k = 0 ; k < nnz ; ++k) { tosort[work[get<1>(tuple[k])]++] = make_pair( get<0>(tuple[k]), get<2>(tuple[k])); } #pragma omp parallel for for(int i=0; i< cols; ++i) { sort(tosort.begin() + colptr[i], tosort.begin() + colptr[i+1]); typename vector<pair<IT,NT> >::iterator itr; // iterator is a dependent name IT ind; for(itr = tosort.begin() + colptr[i], ind = colptr[i]; itr != tosort.begin() + colptr[i+1]; ++itr, ++ind) { rowids[ind] = itr->first; values[ind] = itr->second; } } } for(IT j = 0; j<nnz; ++j){ std::cout << " read_id : " << rowids[j] << " kmer_id : " << get<1>(tuple[j]) << " pos_in_read : " << values[j] << endl; // TO DO: as value I want a pair<kmer_id, vector<posix_in_read>> } my_free<IT>(work); } // Construct a Csc object from parallel arrays template <class IT, class NT> CSC<IT,NT>::CSC(IT * ri, IT * ci, NT * val, IT mynnz, IT m, IT n):nnz(mynnz),rows(m),cols(n) { assert(nnz != 0 && rows != 0); colptr = my_malloc<IT>(cols + 1); rowids = my_malloc<IT>(nnz); values = my_malloc<NT>(nnz); vector< pair<IT,NT> > tosort (nnz); IT *work = my_malloc<IT>(cols); std::fill(work, work+cols, (IT) 0); for (IT k = 0; k < nnz; ++k) { IT tmp = ci[k]; work[ tmp ]++; // column counts (i.e, w holds the "col difference array") } if(nnz > 0) { colptr[cols] = CumulativeSum (work, cols) ; // cumulative sum of w copy(work, work+cols, colptr); IT last; for (IT k = 0 ; k < nnz ; ++k) { tosort[ work[ci[k]]++] = make_pair( ri[k], val[k]); } #pragma omp parallel for for(int i=0; i< cols; ++i) { sort(tosort.begin() + colptr[i], tosort.begin() + colptr[i+1]); typename vector<pair<IT,NT> >::iterator itr; // iterator is a dependent name IT ind; for(itr = tosort.begin() + colptr[i], ind = colptr[i]; itr != tosort.begin() + colptr[i+1]; ++itr, ++ind) { rowids[ind] = itr->first; values[ind] = itr->second; } } } my_free<IT>(work); } // check if sorted within columns template <class IT, class NT> void CSC<IT,NT>::Sorted() { bool sorted = true; for(IT i=0; i< cols; ++i) { sorted &= my_is_sorted (rowids + colptr[i], rowids + colptr[i+1], std::less<IT>()); } } template <class IT, class NT> void CSC<IT,NT>::shuffleIds() { mt19937_64 mt(0); for (IT i = 0; i < cols; ++i) { IT offset = colptr[i]; IT width = colptr[i + 1] - colptr[i]; uniform_int_distribution<IT> rand_scale(0, width - 1); for (IT j = colptr[i]; j < colptr[i + 1]; ++j) { IT target = rand_scale(mt); IT tmpId = rowids[offset + target]; NT tmpVal = values[offset + target]; rowids[offset + target] = rowids[j]; values[offset + target] = values[j]; rowids[j] = tmpId; values[j] = tmpVal; } } } template <class IT, class NT> bool CSC<IT,NT>::operator==(const CSC<IT,NT> & rhs) { if(nnz != rhs.nnz || rows != rhs.rows || cols != rhs.cols) return false; bool same = std::equal(colptr, colptr+cols+1, rhs.colptr); same = same && std::equal(rowids, rowids+nnz, rhs.rowids); bool samebefore = same; ErrorTolerantEqual<NT> epsilonequal(EPSILON); same = same && std::equal(values, values+nnz, rhs.values, epsilonequal ); if(samebefore && (!same)) { #ifdef DEBUG vector<NT> error(nnz); transform(values, values+nnz, rhs.values, error.begin(), absdiff<NT>()); vector< pair<NT, NT> > error_original_pair(nnz); for(IT i=0; i < nnz; ++i) error_original_pair[i] = make_pair(error[i], values[i]); if(error_original_pair.size() > 10) // otherwise would crush for small data { partial_sort(error_original_pair.begin(), error_original_pair.begin()+10, error_original_pair.end(), greater< pair<NT,NT> >()); cout << "Highest 10 different entries are: " << endl; for(IT i=0; i < 10; ++i) cout << "Diff: " << error_original_pair[i].first << " on " << error_original_pair[i].second << endl; } else { sort(error_original_pair.begin(), error_original_pair.end(), greater< pair<NT,NT> >()); cout << "Highest different entries are: " << endl; for(typename vector< pair<NT, NT> >::iterator it=error_original_pair.begin(); it != error_original_pair.end(); ++it) cout << "Diff: " << it->first << " on " << it->second << endl; } #endif } return same; } template <class IT, class NT> void CSC<IT,NT>::intersect (const IT* rowids_in, const NT* values_in, const IT len_in, const IT* ri, const IT len_ri, IT* rowids_out, NT* values_out, IT* len_out) { IT maxlen = len_in>len_ri ? len_in : len_ri; double r = len_in>len_ri ? (double)len_in/len_ri : (double)len_ri/len_in; //if(log2(maxlen) < r) // linear scan is asymptotically better { IT idx=0; for(int j=0, k=0; j<len_in && k < len_ri;) { if(ri[k] < rowids_in[j]) k++; else if(ri[k] > rowids_in[j]) j++; else //(ri[k]==rowids[j]) { values_out[idx] = values_in[j]; rowids_out[idx++] = rowids_in[j]; k++; j++; // repeated rows are not allowed } } *len_out = idx; } //else // use finger search { } } template <class IT, class NT> CSC<IT,NT> CSC<IT,NT>::SpRef2 (const IT* ri, const IT rilen, const IT* ci, const IT cilen) { if( cilen>0 && ci[cilen-1] > cols) { cerr << "Col indices out of bounds" << endl; abort(); } if( rilen>0 && ri[rilen-1] > rows) { cerr << "Row indices out of bounds" << endl; abort(); } // count nnz(A[,:J]) IT nnz_ci = 0; for(int i=0; i<cilen; i++) { nnz_ci = nnz_ci + colptr[ci[i]+1] - colptr[ci[i]]; } // IT* rowids_out = new IT[nnz_ci]; // NT* values_out = new NT[nnz_ci]; // IT* len_out = new IT[cilen]; IT *rowids_out = my_malloc<IT>(nnz_ci); IT *values_out = my_malloc<NT>(nnz_ci); IT *len_out = my_malloc<IT>(cilen); IT idx=0; for(int i=0; i<cilen; i++) { IT cidx1 = colptr[ci[i]]; IT cidx2 = colptr[ci[i]+1]; intersect (&rowids[cidx1], &values[cidx1], cidx2 - cidx1,ri, rilen, &rowids_out[cidx1], &values_out[cidx1], &len_out[i]); } CSC C; C.rows = rilen; C.cols = cilen; // C.colptr = new IT[C.cols+1]; C.colptr = my_malloc<IT>(C.cols + 1); C.colptr[0] = 0; for(int i=0; i < C.cols; ++i) { C.colptr[i+1] = C.colptr[i] + len_out[i]; } C.nnz = C.colptr[C.cols]; // C.rowids = new IT[C.nnz]; // C.values = new NT[C.nnz]; C.rowids = my_malloc<IT>(C.nnz); C.values = my_malloc<NT>(C.nnz); for(int i=0; i< C.cols; ++i) // combine step { IT cidx1 = colptr[ci[i]]; IT cidx2 = cidx1 + len_out[i]; copy(&rowids_out[cidx1], &rowids_out[cidx2], C.rowids + C.colptr[i]); copy(&values_out[cidx1], &values_out[cidx2], C.values + C.colptr[i]); } return C; } // write genereal purpose set-intersect // binary search is faster is one of the vectors is very large // we assume that ri and ci are sorted in ascending order // also assume that matrix sorted within column // output is another CSC // note that ri and ci might have repeated entries // behaviour is exactly similar to the matlab implementation template <class IT, class NT> CSC<IT,NT> CSC<IT,NT>::SpRef (const vector<IT> & ri, const vector<IT> & ci) { if( (!ci.empty()) && (ci.back() > cols)) { cerr << "Col indices out of bounds" << endl; abort(); } if( (!ri.empty()) && (ri.back() > rows)) { cerr << "Row indices out of bounds" << endl; abort(); } // first, count nnz in the result matrix IT refnnz = 0; for(int i=0; i<ci.size(); i++) { IT j = colptr[ci[i]], k=0; IT endIdx = colptr[ci[i]+1]; while(j<endIdx && k < ri.size()) { //cout << j << "=" << rowids[j] << " :: " << k << "=" << ri[k] << " \n"; if(ri[k]<rowids[j]) k++; else if(ri[k]>rowids[j]) j++; else //(ri[k]==rowids[j]) { refnnz++; k++; //j++; // wait for the next iteration of the inner loop to alow reapted rows } } } // Next, allocate memory and save the result matrix // This two-step implementation is better for multithreading CSC refmat(refnnz, ri.size(), ci.size(), 0); refmat.colptr[0] = 0; IT idx=0; for(int i=0; i<ci.size(); i++) { IT j = colptr[ci[i]], k=0; IT endIdx = colptr[ci[i]+1]; while(j<endIdx && k < ri.size()) { if(ri[k]<rowids[j]) k++; else if(ri[k]>rowids[j]) j++; else //(ri[k]==rowids[j]) { refmat.values[idx] = values[j]; refmat.rowids[idx++] = rowids[j]; k++; //j++; // wait for the next iteration of the inner loop to alow reapted rows } } refmat.colptr[i+1] = idx; } return refmat; } #endif
PoW.c
/* Copyright 2016-2018 The Pop Core Foundation */ #include "PoW.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <assert.h> // #include <omp.h> #include "my_time.h" #include "common.h" #include "my_rand48_r.h" #include "oneWayFunction.h" /* * Step 1: Initialize working memory. */ void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) { uint32_t i, j, k; #ifdef _MSC_VER __declspec(align(16)) uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN], a_rrs[OUTPUT_LEN], b_rrs[OUTPUT_LEN]; #else __attribute__((aligned(16))) uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN], a_rrs[OUTPUT_LEN], b_rrs[OUTPUT_LEN]; #endif uint64_t *a_u64 = (uint64_t *)a, *b_u64 = (uint64_t *)b; funcInfor[0].func(input, inputLen, a); uint64_t randSeed[4] = {0, 0, 0, 0}; const uint32_t iterNum = WORK_MEMORY_SIZE >> 5; for (i = 0; i < iterNum; i += K) { uint8_t t = 0, shift_num = 0; reduce_bit(a, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); // reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); shift_num = reduce32_8bits(i); // rrs(a, OUTPUT_LEN, a_rrs, shift_num); rrs_32Byte_256(a, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48); reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48); reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48); reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48); #ifndef SSE_VERSION struct my_rand48_data randBuffer[4]; my_seed48_r(randSeed[0], &randBuffer[0]); my_seed48_r(randSeed[1], &randBuffer[1]); my_seed48_r(randSeed[2], &randBuffer[2]); my_seed48_r(randSeed[3], &randBuffer[3]); #else struct vrand48_data randBuffer[2]; vseed48(randSeed , &randBuffer[0]); vseed48(randSeed + 2, &randBuffer[1]); #endif memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t)); for (k = 1; k < K; ++k) { #ifndef SSE_VERSION my_rand64_r(&randBuffer[0], &b_u64[0]); my_rand64_r(&randBuffer[1], &b_u64[1]); my_rand64_r(&randBuffer[2], &b_u64[2]); my_rand64_r(&randBuffer[3], &b_u64[3]); uint8_t shift_num; uint8_t result[OUTPUT_LEN]; // reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); shift_num = reduce32_8bits(i + k); // rrs(b, OUTPUT_LEN, result, shift_num); rrs_32Byte_256(b, b_rrs, shift_num); uint64_t *b_rrs_u64 = (uint64_t *)b_rrs; a_u64[0] ^= b_rrs_u64[0]; a_u64[1] ^= b_rrs_u64[1]; a_u64[2] ^= b_rrs_u64[2]; a_u64[3] ^= b_rrs_u64[3]; #else vrand64(b, randBuffer); uint8_t shift_num; shift_num = reduce32_8bits(i + k); // rrs(b, OUTPUT_LEN, result, shift_num); rrs_32Byte_256(b, b_rrs, shift_num); __m128i val = _mm_load_si128((__m128i *)a); __m128i vah = _mm_load_si128((__m128i *)(a + 16)); __m128i vresultl = _mm_load_si128((__m128i *)b_rrs); __m128i vresulth = _mm_load_si128((__m128i *)(b_rrs + 16)); vresultl = _mm_xor_si128(val, vresultl); vresulth = _mm_xor_si128(vah, vresulth); _mm_store_si128((__m128i *)a, vresultl); _mm_store_si128((__m128i *)(a + 16), vresulth); #endif memcpy(Maddr + ((i + k) << 5), b_rrs, OUTPUT_LEN*sizeof(uint8_t)); } } } /* * Step 2: Modify the working memory contents. */ void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C, uint8_t *result) { uint32_t i, j; #ifdef _MSC_VER __declspec(align(16)) uint8_t a[OUTPUT_LEN], b[64], a_rrs[OUTPUT_LEN]; #else __attribute__((aligned(16))) uint8_t a[OUTPUT_LEN], b[64], a_rrs[OUTPUT_LEN]; #endif uint64_t *a_u64 = (uint64_t *)a, *b_u64 = (uint64_t *)b; funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a); memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t)); uint64_t r = 0; reduce_bit(a, 32, (uint8_t *)&r, 64); const uint32_t iterNum = L << 6; for (i = 0; i < C; ++i) { uint64_t randSeed = 0; reduce_bit(a, 32, (uint8_t *)&randSeed, 48); struct my_rand48_data randBuffer; my_seed48_r(randSeed, &randBuffer); uint8_t t1, t2, s; uint64_t randNum = 0, base = 0; for (j = 0; j < iterNum; ++j) { my_rand48_r(&randBuffer, &randNum); base = randNum + r; uint64_t offset = 0; // reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8); offset = reduce64_8bits(r); offset = (offset << 8) + 1; uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE; uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE; t1 = Maddr[addr1]; t2 = Maddr[addr2]; s = a[j & 0x1f]; Maddr[addr1] = t2 ^ s; Maddr[addr2] = t1 ^ s; b[j & 0x3f] = t1 ^ t2; r = r + s + t1 + t2; } uint8_t t = 0; // reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8); // t = (t & 0x0f) ^ (t >> 4); t = reduce64_4bits(r); reduce_bit(b, 64, a, 256); uint8_t shift_num = 0; uint64_t ir = r + i; // reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8); shift_num = reduce64_8bits(ir); // rrs(a, OUTPUT_LEN, a_rrs, shift_num); rrs_32Byte_256(a, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); uint64_t *result_u64 = (uint64_t *)result; result_u64[0] ^= a_u64[0]; result_u64[1] ^= a_u64[1]; result_u64[2] ^= a_u64[2]; result_u64[3] ^= a_u64[3]; } } /* * Step 3: Calculate the final result. */ void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *output) { uint32_t i = 0, j = 0, k = 0; #ifdef _MSC_VER __declspec(align(16)) uint8_t result[OUTPUT_LEN], result_rrs[OUTPUT_LEN]; #else __attribute__((aligned(16))) uint8_t result[OUTPUT_LEN], result_rrs[OUTPUT_LEN]; #endif uint64_t *Maddr_u64 = (uint64_t *)Maddr; uint64_t *result_u64 = (uint64_t *)result; const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1; uint32_t it = 0; memcpy(result, c, OUTPUT_LEN * sizeof(uint8_t)); while(1) { uint8_t t = 0, shift_num = 0; uint32_t d = 0; reduce_bit(result, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(result, 32, (uint8_t *)&d, D); ++d; for (j = 0; j < d; ++j) { uint32_t index = i << 2; result_u64[0] ^= Maddr_u64[index + 0]; result_u64[1] ^= Maddr_u64[index + 1]; result_u64[2] ^= Maddr_u64[index + 2]; result_u64[3] ^= Maddr_u64[index + 3]; ++i; if (i == num) { it = i + t; // reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); shift_num = reduce32_8bits(it); // rrs(result, OUTPUT_LEN, result_rrs, shift_num); rrs_32Byte_256(result, result_rrs, shift_num); funcInfor[0].func(result_rrs, 32, result); memcpy(output, result, OUTPUT_LEN * sizeof(uint8_t)); return; } } it = t + i; // reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); shift_num = reduce32_8bits(it); // rrs(result, OUTPUT_LEN, result_rrs, shift_num); rrs_32Byte_256(result, result_rrs, shift_num); funcInfor[t].func(result_rrs, 32, result); } } /* * Correctness & Performance test for Proof of work */ /* void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) { int64_t j; uint32_t inputLen = messLen; uint8_t input[INPUT_LEN], output[OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); // Init all one-way function initOneWayFunction(); uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); printf("****************************** Correctness test (PoW function) ******************************\n"); printf("Test message: %s\n", mess); powFunction(input, inputLen, Maddr, output); view_data_u8("PoW", output, OUTPUT_LEN); printf("*********************************************************************************************\n"); printf("*************************************************** Performance test (PoW function) ***************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); printf("00 %-18s\t", "PoW"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f bps ", iterNum / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output, OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); printf("***************************************************************************************************************************************\n"); if (NULL != result) { free(result); result = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } */ #define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL) #define MAX_TEST_INPUT_LEN 200 #define MAX_OUT_FILE_NAME_LEN 25 const char testInputCase[][MAX_TEST_INPUT_LEN] = { "", "HelloWorld", "0123456789" }; void powNistTest(const char *outFileName) { const uint64_t iterNum = 1024UL * 1024UL; // const uint64_t iterNum = 1024UL; uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); assert(NULL != outputBuffer); memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); initOneWayFunction(); uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]); for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) { char curOutFileName[MAX_OUT_FILE_NAME_LEN] = ""; sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx); FILE *fp = NULL; if (NULL != (fp = fopen(curOutFileName, "wb"))) { const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]); uint8_t input[MAX_TEST_INPUT_LEN]; memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t)); memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t)); double startTime = get_wall_time(); powFunction(input, testInputCaseLen, Maddr, outputBuffer); for (uint64_t i = 1, j = 0; i < iterNum; ++i) { memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t)); j += OUTPUT_LEN; powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j); /* if (j == OUTPUT_BUFFER_SIZE) { fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); j = 0; } */ } double endTime = get_wall_time(); double costTime = endTime - startTime; fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %lu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \ testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout); fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); fclose(fp); } else { fprintf(stderr, "Error: Open %s failed!\n", curOutFileName); abort(); } } if (NULL != outputBuffer) { free(outputBuffer); outputBuffer = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } }
GB_binop__islt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_08__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_02__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_04__islt_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int8) // A*D function (colscale): GB (_AxD__islt_int8) // D*A function (rowscale): GB (_DxB__islt_int8) // C+=B function (dense accum): GB (_Cdense_accumB__islt_int8) // C+=b function (dense accum): GB (_Cdense_accumb__islt_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int8) // C=scalar+B GB (_bind1st__islt_int8) // C=scalar+B' GB (_bind1st_tran__islt_int8) // C=A+scalar GB (_bind2nd__islt_int8) // C=A'+scalar GB (_bind2nd_tran__islt_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
exemplo_do_while.c
#include "exemplos.h" // Exemplo Section int main(int argc, char **argv) { int nthreads, thread_id, nloops; int max_loops = 1000; int j; printf("I am the main thread.\n"); j= 0; do{ #pragma omp parallel private(j) reduction(+: nloops) { nloops = 0 * (max_loops/4); do //Start horizontal loop { nloops++; } while (nloops < 1 * (max_loops/4)); //End horizontal loop } j++; }while(j < max_loops); printf("The total number of iterations is %d\n", nloops); printf("Here I am, back to the main thread.\n"); return 0; }
GB_binop__times_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_01__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc32) // A*D function (colscale): GB (_AxD__times_fc32) // D*A function (rowscale): GB (_DxB__times_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__times_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__times_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc32) // C=scalar+B GB (_bind1st__times_fc32) // C=scalar+B' GB (_bind1st_tran__times_fc32) // C=A+scalar GB (_bind2nd__times_fc32) // C=A'+scalar GB (_bind2nd_tran__times_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_mul (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_mul (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FC32 || GxB_NO_TIMES_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_fc32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_mul (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_mul (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_mul (x, aij) ; \ } GrB_Info GB (_bind1st_tran__times_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_mul (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_uint16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_int32 // op(A') function: GB_tran__abs_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DataDependency.c
#include <math.h> #include <omp.h> void a(int *x, int *y, int n) { double factor = 1; for (int i = 0; i < n; i++) { x[i] = factor * y[i]; factor = factor / 2; } } void a_sol(int *x, int *y, int n) { double factor = 1; #pragma omp parallel for schedule(guided) for (int i = 0; i < n; i++) { x[i] = (factor / pow(2, i)) * y[i]; } } void b(int *x, int *y, int *z, int n) { for (int i = 1; i < n; i++) { x[i] = (x[i] + y[i - 1]) / 2; y[i] = y[i] + z[i] * 3; } } void b_sol(int *x, int *y, int *z, int n) { #pragma omp parallel { #pragma omp for for (int i = 1; i < n; i++) { y[i] = y[i] + z[i] * 3; } #pragma omp for for (int i = 1; i < n; i++) { x[i] = (x[i] + y[i - 1]) / 2; } }; } void c(int *x, int *y, int n, int twice) { x[0] = x[0] + 5 * y[0]; for (int i = 1; i < n; i++) { x[i] = x[i] + 5 * y[i]; if (twice) { x[i - 1] = 2 * x[i - 1]; } } } void c_sol(int *x, int *y, int n, int twice) { x[0] = x[0] + 5 * y[0]; #pragma omp parallel { #pragma omp for for (int i = 1; i < n; i++) { x[i] = x[i] + 5 * y[i]; } if (twice) { #pragma omp for for (int i = 1; i < n; i++) { x[i - 1] = 2 * x[i - 1]; } } }
scan.c
/** * scan.c * Authors: Yizhao Gao <yizhaotsccsj@gmail.com> * Date: {08/01/2017} */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> void getWindowCandI(double * x, double * y, int * nCass, double * intensity, int locCount, double wSize, int wCount, int * casInW, double * intenInW) { double distance; int minWindow; for(int i = 0; i < locCount * wCount; i++) { casInW[i] = 0; intenInW[i] = 0.0; } #pragma omp parallel for private(distance, minWindow) for(int i = 0; i < locCount; i++) { for(int j = 0; j < locCount; j++) { distance = sqrt((x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j])); minWindow = (int)(ceil(distance / wSize)); if(minWindow > 0) minWindow --; for(int k = minWindow; k < wCount; k++) { casInW[i * wCount + k] += nCass[j]; intenInW[i * wCount + k] += intensity[j]; } } } return; } void getWindowCOnly(double * x, double * y, int * nCass, int locCount, double wSize, int wCount, int * casInW) { double distance; int minWindow; for(int i = 0; i < locCount * wCount; i++) { casInW[i] = 0; } #pragma omp parallel for private(distance, minWindow) for(int i = 0; i < locCount; i++) { for(int j = 0; j < locCount; j++) { distance = sqrt((x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j])); minWindow = (int)(ceil(distance / wSize)); if(minWindow > 0) minWindow --; for(int k = minWindow; k < wCount; k++) { casInW[i * wCount + k] += nCass[j]; } } } return; } void loglikelihood(double * ll, int * casInW, double * intenInW, int totalWindow, int casCount, int highLow) { double cas, inten; double llTemp; bool highCluster = true; bool lowCluster = true; if(highLow == 1) lowCluster = false; else if(highLow == -1) highCluster = false; #pragma omp parallel for private(cas, inten, llTemp) for(int i = 0; i < totalWindow; i++) { cas = casInW[i]; inten = intenInW[i]; if(cas == -1 || inten < 0.000001) { ll[i] = -9999; } else if(cas > inten) { //High cluster of cases if(highCluster) { llTemp = cas * log(cas/inten); if(cas < casCount) llTemp += (casCount - cas) * log((casCount - cas)/(casCount - inten)); ll[i] = llTemp; } else ll[i] = -9999; } else if(cas < inten) { //Low cluster of cases if(lowCluster) { llTemp = (casCount - cas) * log((casCount - cas)/(casCount - inten)); if(cas > 0) llTemp += cas * log(cas/inten); ll[i] = llTemp; } else ll[i] = -9999; } } return; } void findTopNCluster(double * x, double * y, int locCount, double * ll, double wSize, int wCount, int * center, int * radius, double * cLL, int nClusters) { if(nClusters < 1) return; int aCenter = -1; int aRadius = -1; for(int i = 0; i < locCount; i++) { for(int j = 0; j < wCount; j++) { if(ll[i * wCount + j] > -9990) { if(aCenter < 0) { aCenter = i; aRadius = j; } else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) { aCenter = i; aRadius = j; } } } } center[0] = aCenter; radius[0] = aRadius; cLL[0] = ll[aCenter * wCount + aRadius]; double lastX, lastY, lastRad; lastX = x[aCenter]; lastY = y[aCenter]; lastRad = (aRadius + 1) * wSize; double distance; int maxWindow; for(int c = 1; c < nClusters; c ++) { //Remove intersecting clusters for(int i = 0; i < locCount; i++) { distance = sqrt((x[i] - lastX) * (x[i] - lastX) + (y[i] - lastY) * (y[i] - lastY)) - lastRad; maxWindow = ceil(distance / wSize) - 1; if(maxWindow < 0) maxWindow = 0; for(int j = maxWindow; j < wCount; j++) ll[i * wCount + j] = -9999; } //Find secoundary clusters aCenter = -1; aRadius = -1; for(int i = 0; i < locCount; i++) { for(int j = 0; j < wCount; j++) { if(ll[i * wCount + j] > -9990) { if(aCenter < 0) { aCenter = i; aRadius = j; } else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) { aCenter = i; aRadius = j; } } } } center[c] = aCenter; radius[c] = aRadius; if(aCenter != -1) cLL[c] = ll[aCenter * wCount + aRadius]; else break; lastX = x[aCenter]; lastY = y[aCenter]; lastRad = (aRadius + 1) * wSize; } return; }
if-1.c
/* { dg-do compile } */ void foo (int a, int b, int *p, int *q) { int i; #pragma omp parallel if (a) ; #pragma omp parallel if (parallel:a) ; #pragma omp parallel for simd if (a) for (i = 0; i < 16; i++) ; #pragma omp parallel for simd if (parallel : a) for (i = 0; i < 16; i++) ; #pragma omp parallel for simd if (simd : a) for (i = 0; i < 16; i++) ; #pragma omp parallel for simd if (simd : a) if (parallel:b) for (i = 0; i < 16; i++) ; #pragma omp task if (a) ; #pragma omp task if (task: a) ; #pragma omp taskloop if (a) for (i = 0; i < 16; i++) ; #pragma omp taskloop if (taskloop : a) for (i = 0; i < 16; i++) ; #pragma omp taskloop simd if (a) for (i = 0; i < 16; i++) ; #pragma omp taskloop simd if (taskloop : a) for (i = 0; i < 16; i++) ; #pragma omp taskloop simd if (simd : a) for (i = 0; i < 16; i++) ; #pragma omp taskloop simd if (taskloop:b) if (simd : a) for (i = 0; i < 16; i++) ; #pragma omp target if (a) ; #pragma omp target if (target: a) ; #pragma omp target simd if (a) for (i = 0; i < 16; i++) ; #pragma omp target simd if (simd : a) if (target: b) for (i = 0; i < 16; i++) ; #pragma omp target teams distribute parallel for simd if (a) for (i = 0; i < 16; i++) ; #pragma omp target teams distribute parallel for simd if (parallel : a) if (target: b) for (i = 0; i < 16; i++) ; #pragma omp target teams distribute parallel for simd if (simd : a) if (target: b) for (i = 0; i < 16; i++) ; #pragma omp target data if (a) map (p[0:2]) ; #pragma omp target data if (target data: a) map (p[0:2]) ; #pragma omp target enter data if (a) map (to: p[0:2]) #pragma omp target enter data if (target enter data: a) map (to: p[0:2]) #pragma omp target exit data if (a) map (from: p[0:2]) #pragma omp target exit data if (target exit data: a) map (from: p[0:2]) #pragma omp target update if (a) to (q[0:3]) #pragma omp target update if (target update:a) to (q[0:3]) #pragma omp parallel { #pragma omp cancel parallel if (a) } #pragma omp parallel { #pragma omp cancel parallel if (cancel:a) } #pragma omp for for (i = 0; i < 16; i++) { #pragma omp cancel for if (a) } #pragma omp for for (i = 0; i < 16; i++) { #pragma omp cancel for if (cancel: a) } #pragma omp sections { #pragma omp section { #pragma omp cancel sections if (a) } } #pragma omp sections { #pragma omp section { #pragma omp cancel sections if (cancel: a) } } #pragma omp taskgroup { #pragma omp task { #pragma omp cancel taskgroup if (a) } #pragma omp task { #pragma omp cancel taskgroup if (cancel: a) } } }
regression.c
#include "parameters.h" // Includes all defined parameters #include "axi.h" // Axisymmetric coordinates #include "navier-stokes/centered.h" // To solve the Navier-Stokes #include "two-phase.h" // Implements two-phase flow // #include <omp.h> // For openMP parallel #include <gsl/gsl_fit.h> double * forces_array; double * times_array; double gradient = 0.5; double intercept = 4.9; int main () { forces_array = malloc(INTERP_NO * sizeof(double)); times_array = malloc(INTERP_NO * sizeof(double)); DT = 1e-2; run(); free(forces_array); free(times_array); } event force(i++) { if (i < INTERP_NO) { forces_array[i] = gradient * t + intercept; times_array[i] = t; } else { double c0, c1, cov00, cov01, cov11, sumsq; gsl_fit_linear ( times_array, 1, forces_array, 1, INTERP_NO, &c0, &c1, \ &cov00, &cov01, &cov11, &sumsq); fprintf(stderr, "t = %g, c0 = %g, c1 = %g\n", t, c0, c1); double current_force = gradient * t + intercept; double interp_force = c0 + c1 * t; fprintf(stderr, "f = %g, f_interp = %g", current_force, interp_force); fprintf(stderr, "\n"); // #pragma omp critical for (int j = 0; j < INTERP_NO - 1; j++) { forces_array[j] = forces_array[j + 1]; times_array[j] = times_array[j + 1]; } forces_array[INTERP_NO - 1] = current_force; times_array[INTERP_NO - 1] = t; } } event end(t = 1) { fprintf(stderr, "Ended\n"); }
functions.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ #pragma parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { unsigned int n = strlen(string); unsigned int r = n%charsPerInt; unsigned int pad = charsPerInt - r; int j = 0; while (j < pad) { string[n+j] = ' '; j++; } string[n+pad] = '\0'; } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { unsigned int charsPerInt = Nchars/Nints; #pragma parallel for for (int i = 0; i < Nchars; i+= charsPerInt) { if (charsPerInt == 1) { Z[i] = (unsigned int) string[i]; } if (charsPerInt == 2) { Z[i] = (unsigned int) string[i] + 255 + (256 * (unsigned int) string[i+1]); } if (charsPerInt == 3) { Z[i] = (unsigned int) string[i] + 255 + 256 * (unsigned int) string[i+1] + 256 * 256 * (unsigned int) string[i+2]; } } } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { unsigned int charsPerInt = Nchars/Nints; #pragma parallel for for (int i = 0; i < Nints; i+= charsPerInt) { if (charsPerInt == 1) { string[i] = (char) Z[i]; } if (charsPerInt == 2) { string[i+1] = (char) ((Z[i]-255/256) % 256); string[i] = (char) (Z[i] - 256 * string[i+1]) % 256 + 1; } if (charsPerInt == 3) { string[i+1] = (char) ((Z[i] - 255)/256) % 256; string[i] = (char) (Z[i] - 256 * string[i+1]) % 256 + 1; string[i+2] = (char) ((Z[i] - ((string[i+1]*256) + (string[i] + 255)))/(256*256)); } } /* Q1.4 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ }
spmv_N_thread_dynamic.c
/* ********************************************* * 314 Principles of Programming Languages * * Fall 2016 * ********************************************* * * Read a real (non-complex) sparse matrix from a Matrix Market (v. 2.0) file * and a vector from a txt file, perform matrix multiplication and store the * result to output.txt. This is the parallel and static version of sparse matrix vector * multiplication. * * * * NOTES: * * 1) Matrix Market files are always 1-based, i.e. the index of the first * element of a matrix is (1,1), not (0,0) as in C. ADJUST THESE * OFFSETS ACCORDINGLY offsets accordingly when reading and writing * to files. * * 2) ANSI C requires one to use the "l" format modifier when reading * double precision floating point numbers in scanf() and * its variants. For example, use "%lf", "%lg", or "%le" * when reading doubles, otherwise errors will occur. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mmio.h" #include <omp.h> #include <sys/time.h> #include "utils.h" //sorting according to the index void quicksort(double* a, double* vindex, int* rindex, int* cindex, int n) { int i, j, m; double p, t, s; if (n < 2) return; p = vindex[n / 2]; for (i = 0, j = n - 1;; i++, j--) { while (vindex[i]<p) i++; while (p<vindex[j]) j--; if (i >= j) break; t = a[i]; a[i] = a[j]; a[j] = t; s = vindex[i]; vindex[i] = vindex[j]; vindex[j] = s; m = rindex[i]; rindex[i] = rindex[j]; rindex[j] = m; m = cindex[i]; cindex[i] = cindex[j]; cindex[j] = m; } quicksort(a, vindex, rindex, cindex, i); quicksort(a + i, vindex + i, rindex + i, cindex + i, n - i); } int main(int argc, char *argv[]) { int ret_code; MM_typecode matcode; FILE *f; int M, N, nz; //M is row number, N is column number and nz is the number of entry int tmp, i, j, vecdim, *rIndex, *cIndex, *rsIndex, *reIndex; double *val, *res, *vec, *vIndex; if (argc < 4) { fprintf(stderr, "Usage: %s [martix-market-filename] [input-vector-filename] [thread-num]\n", argv[0]); exit(1); } printf("\nOpening input matrix file: %s\n", argv[1]); if ((f = fopen(argv[1], "r")) == NULL) { printf("Fail to open the input matrix file!\n"); exit(1); } if (mm_read_banner(f, &matcode) != 0) { printf("Could not process Matrix Market banner.\n"); exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode)) { printf("Sorry, this application does not support "); printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) != 0) exit(1); /* reseve memory for matrices */ rIndex = (int *)malloc(nz * sizeof(int)); cIndex = (int *)malloc(nz * sizeof(int)); val = (double *)malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i = 0; i<nz; i++) { fscanf(f, "%d %d %lg\n", &rIndex[i], &cIndex[i], &val[i]); rIndex[i]--; /* adjust from 1-based to 0-based */ cIndex[i]--; } if (f != stdin) fclose(f); printf("Opening input vector file: %s\n", argv[2]); //open and load the vector input if ((f = fopen(argv[2], "r")) == NULL) { printf("Fail to open the input vector file!\n"); exit(1); } fscanf(f, "%d\n", &vecdim); if (vecdim != M) { printf("dimension mismatch!\n"); exit(1); } vec = (double*)malloc(vecdim * sizeof(double)); for (i = 0; i<vecdim; i++) { fscanf(f, "%lg\n", &vec[i]); } if (f != stdin) fclose(f); //the original calculation result double* res_seq = (double*)malloc(M*sizeof(double)); memset(res_seq, 0, M*sizeof(double)); getmul(val, vec, rIndex, cIndex, nz, res_seq); vIndex = (double*)malloc(nz*sizeof(double)); memset(vIndex, 0, nz*sizeof(double)); for (i = 0; i < nz; i++) { vIndex[i] = (double)rIndex[i] * N + cIndex[i]; if (vIndex[i] < 0) { printf("Error!\n"); exit(1); } } quicksort(val, vIndex, rIndex, cIndex, nz); //We use rsIndex/reIndex to keep the start/end position of each row. The intial values are //-1 or -2 for all entries. rsIndex[i] indicates the start poistion of the i-th row. Hence //the position index of the i-th row is from rsIndex[i] to reIndex[i] rsIndex = (int*)malloc(M*sizeof(int)); //start/end position of each row memset(rsIndex, -1, M*sizeof(int)); reIndex = (int*)malloc(M*sizeof(int)); memset(reIndex, -2, M*sizeof(int)); for (i = 0; i<nz; i++) { int tmp = (int)(vIndex[i] / N); if (rsIndex[tmp] == -1) { rsIndex[tmp] = i; reIndex[tmp] = i; } else reIndex[tmp] = i; } int thread_num = atoi(argv[3]); omp_set_num_threads(thread_num); printf("\n Start computation ... \n"); struct timeval start, end; gettimeofday(&start, NULL); /************************/ /* now calculate the multiplication */ /************************/ res = (double*)malloc(M*sizeof(double)); memset(res, 0, M*sizeof(double)); // Your OpenMP pragma should be inserted for one or both loops below. // You need to determine which loop is safe to be parallelized. // You will also need to use correct parallelization parameters. Please use a // dynamic schedule for this parallelization stratey. #pragma omp parallel num_threads(thread_num) { #pragma omp for private(j, i, tmp) schedule(dynamic) for (i=0; i<M; i++) { for (j = rsIndex[i]; j <= reIndex[i]; j++) { tmp = cIndex[j]; res[i] += val[j] * vec[tmp]; } } } gettimeofday(&end, NULL); printf(" End of computation ... \n\n"); long elapsed_time = ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)); if (!checkerror(res, res_seq, M)) { printf("Calculation Error!\n"); exit(1); } else { printf(" Test Result Passed ... \n"); } printf("Dynamic Parallelization Total time: %ld micro-seconds\n\n", elapsed_time); if (!checkerror(res, res_seq, M)) { printf("Calculation Error!\n"); exit(1); } // save the result if ((f = fopen("output.txt", "w")) == NULL) { printf("Fail to open the output file!\n"); exit(1); } for (i = 0; i<vecdim; i++) { fprintf(f, "%lg\n", res[i]); } fclose(f); free(res_seq); free(vIndex); free(res); free(vec); free(rIndex); free(cIndex); free(val); free(rsIndex); free(reIndex); return 0; }
tsbjac2.c
#include "sbjac2.h" #include "wnrme.h" #include "rnd.h" #include "timer.h" int main(int argc, char *argv[]) { if (4 != argc) { (void)fprintf(stderr, "%s filename 2^{batch_size} #batches\n", *argv); return EXIT_FAILURE; } const size_t n = ((size_t)1u << atoz(argv[2u])); if (n % VSL) { (void)fprintf(stderr, "batch_size has to be a multiple of %u.\n", VSL); return EXIT_FAILURE; } int th = 0; #ifdef _OPENMP th = omp_get_max_threads(); if (n % th) { (void)fprintf(stderr, "batch_size has to be a multiple of %d.\n", th); return EXIT_FAILURE; } #endif /* _OPENMP */ const size_t b = atoz(argv[3u]); if (!b) return EXIT_SUCCESS; const size_t nl = strlen(argv[1u]), nl1 = (nl + 1u); char *const fn = calloc((nl + 3u), sizeof(char)); assert(fn); strcpy(fn, argv[1u])[nl] = '.'; int fm = O_RDONLY; #ifdef _LARGEFILE64_SOURCE fm |= O_LARGEFILE; #endif /* _LARGEFILE64_SOURCE */ fn[nl1] = 'k'; const int fk = open(fn, fm); if (-1 >= fk) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'l'; const int fl = open(fn, fm); if (-1 >= fl) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'f'; const int ff = open(fn, fm); if (-1 >= ff) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'g'; const int fg = open(fn, fm); if (-1 >= fg) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'h'; const int fh = open(fn, fm); if (-1 >= fh) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } const size_t nt = n * sizeof(float); float *const a11 = (float*)aligned_alloc(VA, nt), *const a22 = (float*)aligned_alloc(VA, nt), *const a21 = (float*)aligned_alloc(VA, nt), *const c = (float*)aligned_alloc(VA, nt), *const at = (float*)aligned_alloc(VA, nt), *const l1 = (float*)aligned_alloc(VA, nt), *const l2 = (float*)aligned_alloc(VA, nt); assert(a11); assert(a22); assert(a21); assert(c); assert(at); assert(l1); assert(l2); unsigned *const p = (unsigned*)malloc((n >> VSLlg) * sizeof(unsigned)); assert(p); unsigned rd[2u] = { 0u, 0u }; uint64_t hz = tsc_get_freq_hz_(rd), be[2u] = { UINT64_C(0), UINT64_C(0) }; (void)fprintf(stderr, "TSC frequency: %llu+(%u/%u) Hz.\n", (unsigned long long)hz, rd[0u], rd[1u]); (void)fflush(stderr); (void)fprintf(stdout, "\"B\",\"Ts\",\"ORT\",\"REN\",\"RLN\",\"RLX\",\"RLM\"\n"); (void)fflush(stdout); const char *bf = (const char*)NULL; if (b <= 10u) bf = "%1zu"; else if (b <= 100u) bf = "%2zu"; else if (b <= 1000u) bf = "%3zu"; else // b > 1000 bf = "%zu"; const size_t n_t = n / imax(th, 1); const size_t cnt = n_t * sizeof(float); char a[31u] = { '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0' }; for (size_t j = 0u; j < b; ++j) { (void)fprintf(stdout, bf, j); (void)fflush(stdout); const size_t jn = j * n; #ifdef _OPENMP #pragma omp parallel default(none) shared(ff,fg,fh,a11,a22,a21,n,n_t,cnt,jn) #endif /* _OPENMP */ { const int mt = #ifdef _OPENMP omp_get_thread_num() #else /* !_OPENMP */ 0 #endif /* ?_OPENMP */ ; const size_t tnt = mt * n_t; const off_t off = (jn + tnt) * sizeof(float); if ((ssize_t)cnt != pread(ff, (a11 + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fg, (a22 + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fh, (a21 + tnt), cnt, off)) exit(EXIT_FAILURE); } (void)fprintf(stdout, ","); (void)fflush(stdout); be[0u] = rdtsc_beg(rd); const fint _n = -(fint)n; (void)sbjac2_(&_n, a11, a22, a21, c, at, l1, l2, p); be[1u] = rdtsc_end(rd); (void)fprintf(stdout, "%15.9Lf,", tsc_lap(hz, be[0u], be[1u])); (void)fflush(stdout); wide o = W_ZERO, r = W_ZERO; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,a11,a22,a21,c,at,l1,l2) reduction(max:o,r) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) { const wide CS = (wide)(c[i]); const wide SN = (wide)(at[i]); wide AE = W_ZERO, AN = W_ZERO; o = fmaxw(o, worr(CS, SN)); r = fmaxw(r, wrer(a11[i], a22[i], a21[i], CS, SN, l1[i], l2[i], &AE, &AN)); } (void)fprintf(stdout, "%s,", xtoa(a, (long double)o)); (void)fprintf(stdout, "%s", xtoa(a, (long double)r)); (void)fflush(stdout); #ifdef _OPENMP #pragma omp parallel default(none) shared(fk,fl,c,at,n,n_t,cnt,jn) #endif /* _OPENMP */ { const int mt = #ifdef _OPENMP omp_get_thread_num() #else /* !_OPENMP */ 0 #endif /* ?_OPENMP */ ; const size_t tnt = mt * n_t; const off_t off = (jn + tnt) * sizeof(float); if ((ssize_t)cnt != pread(fk, (c + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fl, (at + tnt), cnt, off)) exit(EXIT_FAILURE); } (void)fprintf(stdout, ","); (void)fflush(stdout); wide x = W_ZERO, m = W_ZERO; r = W_ZERO; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,l1,l2,c,at) reduction(max:r,x,m) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) { wide AE = W_ZERO, AN = W_ZERO; const wide RE = wlam(l1[i], l2[i], c[i], at[i], &AE, &AN); r = fmaxw(r, RE); x = fmaxw(x, AE); m = fmaxw(m, AN); } (void)fprintf(stdout, "%s,", xtoa(a, (long double)r)); (void)fprintf(stdout, "%s,", xtoa(a, (long double)x)); (void)fprintf(stdout, "%s\n", xtoa(a, (long double)m)); (void)fflush(stdout); } (void)close(fh); (void)close(fg); (void)close(ff); (void)close(fl); (void)close(fk); free(p); free(l2); free(l1); free(at); free(c); free(a21); free(a22); free(a11); free(fn); return EXIT_SUCCESS; }
scrypt_fmt.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2013 by Solar Designer * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. */ #include <stdio.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "escrypt/crypto_scrypt.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "base64_convert.h" #include "memdbg.h" #define FORMAT_LABEL "scrypt" #define FORMAT_NAME "" #define FMT_TAG7 "$7$" #define FMT_TAG7_LEN (sizeof(FMT_TAG7)-1) #define FMT_CISCO9 "$9$" #define FMT_CISCO9_LEN (sizeof(FMT_CISCO9)-1) #define FMT_SCRYPTKDF "$ScryptKDF.pm$" #define FMT_SCRYPTKDF_LEN (sizeof(FMT_SCRYPTKDF)-1) #ifdef __XOP__ #define ALGORITHM_NAME "Salsa20/8 128/128 XOP" #elif defined(__AVX__) #define ALGORITHM_NAME "Salsa20/8 128/128 AVX" #elif defined(__SSE2__) #define ALGORITHM_NAME "Salsa20/8 128/128 SSE2" #else #define ALGORITHM_NAME "Salsa20/8 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT " (16384, 8, 1)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 256 #define BINARY_ALIGN 1 #define SALT_SIZE BINARY_SIZE #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"$7$C6..../....SodiumChloride$kBGj9fHznVYFQMEn/qDCfrDevf9YDtcDdKvEqHJLV8D", "pleaseletmein"}, {"$7$C6..../....\x01\x09\x0a\x0d\x20\x7f\x80\xff$b7cKqzsQk7txdc9As1WZBHjUPNWQWJW8A.UUUTA5eD1", "\x01\x09\x0a\x0d\x20\x7f\x80\xff"}, {"$7$2/..../....$rNxJWVHNv/mCNcgE/f6/L4zO6Fos5c2uTzhyzoisI62", ""}, {"$7$86....E....NaCl$xffjQo7Bm/.SKRS4B2EuynbOLjAmXU5AbDbRXhoBl64", "password"}, // cisco type 9 hashes. . They are $7$C/..../.... type (N=16384, r=1, p=1) different base-64 (same as WPA). salt used RAW {"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"}, {"$9$cvWdfQlRRDKq/U$VFTPha5VHTCbSgSUAo.nPoh50ZiXOw1zmljEjXkaq1g", "123456"}, {"$9$X9fA8mypebLFVj$Klp6X9hxNhkns0kwUIinvLRSIgWOvCwDhVTZqjsycyU", "JtR"}, // 3rd type ScryptKDF.pm format (we saw this in CMIYC 2013) // Generate in perl with scrypt_hash($_[1],$salt,1<<$N,$r,$p,$bytes) // to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p // For this hash we replace the default ':' chars in the hash with '*' so they will end up as 1 // field, and change the SCRYPT into $ScryptKDF.pm$. So this hash // SCRYPT:16384:8:1:VHRuaXZOZ05INWJs:JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A= // gets change into (by ScryptKDF2john) // $ScryptKDF.pm$16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A= // and then in prepare, this becomes (which is canonical for this format) // $7$C6..../....TtnivNgNH5bl$acXnAzE8oVzGwW9Tlu6iw7fq021J/1sZmEKhcLBrT02 {"$ScryptKDF.pm$16384*8*1*bjZkemVmZ3lWVi42*cmBflTPsqGIbg9ZIJRTQdbic8OCUH+904TFmNPBkuEA=","test123"}, {"$ScryptKDF.pm$16384*8*1*VlVYUzBhQmlNbk5J*bJhm6VUS2UQRwMRqLTvSsljDeq193Ge4aqQDtb94bKg=","hello"}, {"$ScryptKDF.pm$16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0BhlHpZJ3J2jcozCDM7t+sfjkgQ894R+f+ldVWM5atlkA==","password"}, {NULL} }; // from crypt_scrypt-common.c (removed static from that file on these 3 functions) extern const uint8_t * decode64_uint32(uint32_t * dst, uint32_t dstbits, const uint8_t * src); extern uint8_t * encode64_uint32(uint8_t * dst, size_t dstlen, uint32_t src, uint32_t srcbits); extern int decode64_one(uint32_t * dst, uint8_t src); static int max_threads; static escrypt_local_t *local; static char saved_salt[SALT_SIZE]; static struct { char key[PLAINTEXT_LENGTH + 1]; char out[BINARY_SIZE]; } *buffer; static void init(struct fmt_main *self) { int i; #ifdef _OPENMP max_threads = omp_get_max_threads(); self->params.min_keys_per_crypt *= max_threads; self->params.max_keys_per_crypt *= max_threads; #else max_threads = 1; #endif local = mem_alloc(sizeof(*local) * max_threads); for (i = 0; i < max_threads; i++) escrypt_init_local(&local[i]); buffer = mem_alloc(sizeof(*buffer) * self->params.max_keys_per_crypt); } static char N_to_c(int N) { int b=0; while (N>>=1) ++b; return itoa64[b]; } static char *prepare(char *fields[10], struct fmt_main *self) { static char Buf[256]; char tmp[512], tmp2[512], tmp4[256], tmp5[6], tmp6[6], *cp, *cp2; int N, r, p; if (!strncmp(fields[1], FMT_CISCO9, FMT_CISCO9_LEN)) { // cisco type 9 hashes. scrypt params: N=16384, r=1, p=1 hash in crypt format. Change it to CryptBS. // salt is 14 byte RAW, we can use it as is. //from: {"$9$nhEmQVczB7dqsO$X.HsgL6x1il0RxkOSSvyQYwucySCt7qFm4v7pqCxkKM", "cisco"}, //to: {"$7$C/..../....nhEmQVczB7dqsO$AG.yl8LDCkiErlh4ttizmxYCXSiXYrNY6vKmLDKj/P4", "cisco"}, if (strlen(fields[1]) != 4+14+43) return fields[1]; N=1<<14; r=1; p=1; encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30); tmp5[5]=0; encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30); tmp6[5]=0; sprintf (Buf, "%s%c%s%s%14.14s$%s", FMT_TAG7, N_to_c(N), tmp5, tmp6, &(fields[1][3]), base64_convert_cp(&(fields[1][3+14+1]), e_b64_crypt, 43, tmp, e_b64_cryptBS, sizeof(tmp), flg_Base64_NO_FLAGS, 0)); } else if (!strncmp(fields[1], FMT_SCRYPTKDF, FMT_SCRYPTKDF_LEN)) { // ScryptKDF.pm (perl) format scrypt, generated by: scrypt_hash($_[1],$salt,$N,$r,$p,$bytes); Since N, r, p // AND bytes are variable, we have to handle computing all of them. NOTE, we may have to make changes to // the crypto_scrypt-common.c to handle the variable number of bytes. // to put into proper format, we mime->raw the salt and mime->cryptBS the hash hash, and fixup $N,$r,$p //from: {"$ScryptKDF.pm$*16384*8*1*VHRuaXZOZ05INWJs*JjrOzA8pdPhLvLh8sY64fLLaAjFUwYCXMmS16NXcn0A=","password"}, //to: {"$7$C6..../....TtnivNgNH5bl$acXnAzE8oVzGwW9Tlu6iw7fq021J/1sZmEKhcLBrT02","password"}, int N, r, p; if (strlen(fields[1]) > sizeof(tmp)+FMT_SCRYPTKDF_LEN) return fields[1]; strcpy(tmp, &fields[1][FMT_SCRYPTKDF_LEN]); cp = strtokm(tmp, "*"); if (!cp || !isdec(cp)) return fields[1]; N = atoi(cp); cp = strtokm(NULL, "*"); if (!cp || !isdec(cp)) return fields[1]; r = atoi(cp); cp = strtokm(NULL, "*"); if (!cp || !isdec(cp)) return fields[1]; p = atoi(cp); cp = strtokm(NULL, "*"); if (!cp) return fields[1]; cp2 = strtokm(NULL, "*"); if (!cp2) return fields[1]; if (base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0) != strlen(cp)) return fields[1]; if (base64_valid_length(cp2, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0) != strlen(cp2)) return fields[1]; encode64_uint32((uint8_t*)tmp5, sizeof(tmp5), r, 30); tmp5[5]=0; encode64_uint32((uint8_t*)tmp6, sizeof(tmp6), p, 30); tmp6[5]=0; memset(tmp4, 0, sizeof(tmp4)); base64_convert_cp(cp, e_b64_mime, strlen(cp), tmp4, e_b64_raw, sizeof(tmp4), flg_Base64_NO_FLAGS, 0); memset(tmp2, 0, sizeof(tmp2)); base64_convert_cp(cp2, e_b64_mime, strlen(cp2), tmp2, e_b64_cryptBS, sizeof(tmp2),flg_Base64_NO_FLAGS, 0); cp = &tmp2[strlen(tmp2)-1]; while (cp > tmp2 && *cp == '.') *cp-- = 0; cp = &tmp4[strlen(tmp)-1]; while (cp > tmp4 && *cp == '.') *cp-- = 0; sprintf (Buf, "%s%c%s%s%s$%s", FMT_TAG7, N_to_c(N), tmp5, tmp6, tmp4, tmp2); } else return fields[1]; return Buf; } static void done(void) { int i; for (i = 0; i < max_threads; i++) escrypt_free_local(&local[i]); MEM_FREE(local); MEM_FREE(buffer); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int length; unsigned tmp; if (strncmp(ciphertext, FMT_TAG7, FMT_TAG7_LEN)) return 0; for (p = ciphertext + FMT_TAG7_LEN; p < ciphertext + (FMT_TAG7_LEN + 1 + 5 + 5); p++) if (atoi64[ARCH_INDEX(*p)] == 0x7F) return 0; p = strrchr(ciphertext, '$'); if (!p) return 0; if (p - ciphertext > BINARY_SIZE - (1 + 43)) return 0; ++p; length = base64_valid_length(p, e_b64_cryptBS, flg_Base64_NO_FLAGS, 0); decode64_one(&tmp, ciphertext[3]); if (!tmp) return 0; decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4]); if (!tmp) return 0; decode64_uint32(&tmp, 30, (const uint8_t *)&ciphertext[4+5]); if (!tmp) return 0; // we want the hash to use 32 bytes OR more. 43 base64 bytes is 32 raw bytes return p[length]==0 && length >= 43; } static void *get_binary(char *ciphertext) { static char out[BINARY_SIZE]; strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */ return out; } static void *get_salt(char *ciphertext) { static char out[SALT_SIZE]; char *cp; /* NUL padding is required */ memset(out, 0, sizeof(out)); if (strlen(ciphertext) > SALT_SIZE-1) memcpy(out, ciphertext, SALT_SIZE-1); else strcpy(out, ciphertext); cp = strchr(&out[8], '$'); while (cp && *cp) { *cp++ = 0; } return out; } #define H(s, i) \ ((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1])) /* * original Hx() macros simple looked at length-2 (last byte, and last byte -2) * now we look at bytes 40 and 38 from the hash, so that longer hashes can * be compared to shorter ones. The last byte may be different, so we * do NOT use that one. This new method works for any number of bytes in * the scrypt 32 or more. #define H0(s) \ int i = strlen(s) - 2; \ return i > 0 ? H((s), i) & 0xF : 0 */ #define H0(s) \ char *cp = strrchr(s,'$')+40; \ int i = cp-s; \ return i > 0 ? H((s), i) & 0xF : 0 #define H1(s) \ char *cp = strrchr(s,'$')+40; \ int i = cp-s; \ return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & 0xFF : 0 #define H2(s) \ char *cp = strrchr(s,'$')+40; \ int i = cp-s; \ return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & 0xFFF : 0 #define H3(s) \ char *cp = strrchr(s,'$')+40; \ int i = cp-s; \ return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \ (H((s), i - 4) << 10)) & 0xFFFF : 0 #define H4(s) \ char *cp = strrchr(s,'$')+40; \ int i = cp-s; \ return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \ (H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & 0xFFFFF : 0 static int binary_hash_0(void *binary) { H0((char *)binary); } static int binary_hash_1(void *binary) { H1((char *)binary); } static int binary_hash_2(void *binary) { H2((char *)binary); } static int binary_hash_3(void *binary) { H3((char *)binary); } static int binary_hash_4(void *binary) { H4((char *)binary); } static int get_hash_0(int index) { H0(buffer[index].out); } static int get_hash_1(int index) { H1(buffer[index].out); } static int get_hash_2(int index) { H2(buffer[index].out); } static int get_hash_3(int index) { H3(buffer[index].out); } static int get_hash_4(int index) { H4(buffer[index].out); } static int salt_hash(void *salt) { int i, h; i = strlen((char *)salt) - 1; if (i > 1) i--; h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])]; h ^= ((unsigned char *)salt)[i - 1]; h <<= 6; h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])]; h ^= ((unsigned char *)salt)[i]; return h & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { strcpy(saved_salt, salt); } static void set_key(char *key, int index) { strnzcpy(buffer[index].key, key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return buffer[index].key; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; int failed = 0; #ifdef _OPENMP #pragma omp parallel for default(none) private(index) shared(count, failed, local, saved_salt, buffer) #endif for (index = 0; index < count; index++) { uint8_t *hash; hash = escrypt_r(&(local[index]), (const uint8_t *)(buffer[index].key), strlen(buffer[index].key), (const uint8_t *)saved_salt, (uint8_t *)&(buffer[index].out), sizeof(buffer[index].out)); if (!hash) { failed = 1; buffer[index].out[0] = 0; } } if (failed) { fprintf(stderr, "scrypt memory allocation failed\n"); error(); } return count; } static int cmp_all(void *binary, int count) { int index; // binary was created as 32 bytes. It will always be // <= length of buffer.out. So we use the binary as // our hash indication lentth (and avoid looking at last byte) int len = strlen(buffer[0].out)-2; for (index = 0; index < count; index++) if (!strncmp((char *)binary, buffer[index].out, len)) return 1; return 0; } static int cmp_one(void *binary, int index) { int len = strlen(buffer[index].out)-2; return !strncmp((char *)binary, buffer[index].out,len); } static int cmp_exact(char *source, int index) { return 1; } static unsigned int tunable_cost_N(void *salt) { const uint8_t * setting; const uint8_t * src; uint64_t N; setting = salt; if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$') return 0; src = setting + 3; { uint32_t N_log2; if (decode64_one(&N_log2, *src)) return 0; src++; N = (uint64_t)1 << N_log2; } return (unsigned int) N; } static unsigned int tunable_cost_r(void *salt) { const uint8_t * setting; const uint8_t * src; uint32_t r; setting = salt; if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$') return 0; src = setting + 3; { uint32_t N_log2; if (decode64_one(&N_log2, *src)) return 0; src++; } src = decode64_uint32(&r, 30, src); if (!src) return 0; return (unsigned int) r; } static unsigned int tunable_cost_p(void *salt) { const uint8_t * setting; const uint8_t * src; uint32_t r, p; setting = salt; if (setting[0] != '$' || setting[1] != '7' || setting[2] != '$') return 0; src = setting + 3; { uint32_t N_log2; if (decode64_one(&N_log2, *src)) return 0; src++; } src = decode64_uint32(&r, 30, src); if (!src) return 0; src = decode64_uint32(&p, 30, src); if (!src) return 0; return (unsigned int) p; } struct fmt_main fmt_scrypt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "N", "r", "p" }, { FMT_TAG7, FMT_CISCO9, FMT_SCRYPTKDF }, tests }, { init, done, fmt_default_reset, prepare, valid, fmt_default_split, get_binary, get_salt, { tunable_cost_N, tunable_cost_r, tunable_cost_p }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, NULL, NULL }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, NULL, NULL }, cmp_all, cmp_one, cmp_exact } };
nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP DEV implementation * of the NVECTOR module. * -----------------------------------------------------------------*/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v) { return SUNDIALS_NVEC_OPENMPDEV; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length) { N_Vector v; N_Vector_Ops ops; N_VectorContent_OpenMPDEV content; /* Create vector */ v = NULL; v = (N_Vector) malloc(sizeof *v); if (v == NULL) return(NULL); /* Create vector operation structure */ ops = NULL; ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops)); if (ops == NULL) { free(v); return(NULL); } ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV; ops->nvclone = N_VClone_OpenMPDEV; ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV; ops->nvdestroy = N_VDestroy_OpenMPDEV; ops->nvspace = N_VSpace_OpenMPDEV; ops->nvgetarraypointer = NULL; ops->nvsetarraypointer = NULL; /* standard vector operations */ ops->nvlinearsum = N_VLinearSum_OpenMPDEV; ops->nvconst = N_VConst_OpenMPDEV; ops->nvprod = N_VProd_OpenMPDEV; ops->nvdiv = N_VDiv_OpenMPDEV; ops->nvscale = N_VScale_OpenMPDEV; ops->nvabs = N_VAbs_OpenMPDEV; ops->nvinv = N_VInv_OpenMPDEV; ops->nvaddconst = N_VAddConst_OpenMPDEV; ops->nvdotprod = N_VDotProd_OpenMPDEV; ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV; ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV; ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV; ops->nvmin = N_VMin_OpenMPDEV; ops->nvwl2norm = N_VWL2Norm_OpenMPDEV; ops->nvl1norm = N_VL1Norm_OpenMPDEV; ops->nvcompare = N_VCompare_OpenMPDEV; ops->nvinvtest = N_VInvTest_OpenMPDEV; ops->nvconstrmask = N_VConstrMask_OpenMPDEV; ops->nvminquotient = N_VMinQuotient_OpenMPDEV; /* fused vector operations (optional, NULL means disabled by default) */ ops->nvlinearcombination = NULL; ops->nvscaleaddmulti = NULL; ops->nvdotprodmulti = NULL; /* vector array operations (optional, NULL means disabled by default) */ ops->nvlinearsumvectorarray = NULL; ops->nvscalevectorarray = NULL; ops->nvconstvectorarray = NULL; ops->nvwrmsnormvectorarray = NULL; ops->nvwrmsnormmaskvectorarray = NULL; ops->nvscaleaddmultivectorarray = NULL; ops->nvlinearcombinationvectorarray = NULL; /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof(struct _N_VectorContent_OpenMPDEV)); if (content == NULL) { free(ops); free(v); return(NULL); } content->length = length; content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; /* Attach content and ops */ v->content = content; v->ops = ops; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMPDEV(sunindextype length) { N_Vector v; realtype *data; realtype *dev_data; int dev; v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if(data == NULL) { N_VDestroy_OpenMPDEV(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; NV_DATA_HOST_OMPDEV(v) = data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata, realtype *d_vdata) { N_Vector v; int dev, host; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); if (length > 0) { /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Attach data */ NV_OWN_DATA_OMPDEV(v) = SUNFALSE; NV_DATA_HOST_OMPDEV(v) = h_vdata; NV_DATA_DEV_OMPDEV(v) = d_vdata; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w) { N_Vector *vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector *) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VClone_OpenMPDEV(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMPDEV(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w) { N_Vector *vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector *) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VCloneEmpty_OpenMPDEV(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMPDEV(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMPDEV */ void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count) { int j; for (j = 0; j < count; j++) N_VDestroy_OpenMPDEV(vs[j]); free(vs); vs = NULL; return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_OpenMPDEV(N_Vector v) { return NV_LENGTH_OMPDEV(v); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the host. */ realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_HOST_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the device. */ realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_DEV_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_OpenMPDEV(N_Vector x) { N_VPrintFile_OpenMPDEV(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMPDEV(x); xd = NV_DATA_HOST_OMPDEV(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%11.8g\n", xd[i]); #else fprintf(outfile, "%11.8g\n", xd[i]); #endif } fprintf(outfile, "\n"); return; } /* ---------------------------------------------------------------------------- * Function to copy host array into device array */ void N_VCopyToDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from host to device */ omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host); return; } /* ---------------------------------------------------------------------------- * Function to copy device array into host array */ void N_VCopyFromDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from device to host */ omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w) { N_Vector v; N_Vector_Ops ops; N_VectorContent_OpenMPDEV content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = (N_Vector) malloc(sizeof *v); if (v == NULL) return(NULL); /* Create vector operation structure */ ops = NULL; ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops)); if (ops == NULL) { free(v); return(NULL); } ops->nvgetvectorid = w->ops->nvgetvectorid; ops->nvclone = w->ops->nvclone; ops->nvcloneempty = w->ops->nvcloneempty; ops->nvdestroy = w->ops->nvdestroy; ops->nvspace = w->ops->nvspace; ops->nvgetarraypointer = w->ops->nvgetarraypointer; ops->nvsetarraypointer = w->ops->nvsetarraypointer; /* standard vector operations */ ops->nvlinearsum = w->ops->nvlinearsum; ops->nvconst = w->ops->nvconst; ops->nvprod = w->ops->nvprod; ops->nvdiv = w->ops->nvdiv; ops->nvscale = w->ops->nvscale; ops->nvabs = w->ops->nvabs; ops->nvinv = w->ops->nvinv; ops->nvaddconst = w->ops->nvaddconst; ops->nvdotprod = w->ops->nvdotprod; ops->nvmaxnorm = w->ops->nvmaxnorm; ops->nvwrmsnormmask = w->ops->nvwrmsnormmask; ops->nvwrmsnorm = w->ops->nvwrmsnorm; ops->nvmin = w->ops->nvmin; ops->nvwl2norm = w->ops->nvwl2norm; ops->nvl1norm = w->ops->nvl1norm; ops->nvcompare = w->ops->nvcompare; ops->nvinvtest = w->ops->nvinvtest; ops->nvconstrmask = w->ops->nvconstrmask; ops->nvminquotient = w->ops->nvminquotient; /* fused vector operations */ ops->nvlinearcombination = w->ops->nvlinearcombination; ops->nvscaleaddmulti = w->ops->nvscaleaddmulti; ops->nvdotprodmulti = w->ops->nvdotprodmulti; /* vector array operations */ ops->nvlinearsumvectorarray = w->ops->nvlinearsumvectorarray; ops->nvscalevectorarray = w->ops->nvscalevectorarray; ops->nvconstvectorarray = w->ops->nvconstvectorarray; ops->nvwrmsnormvectorarray = w->ops->nvwrmsnormvectorarray; ops->nvwrmsnormmaskvectorarray = w->ops->nvwrmsnormmaskvectorarray; ops->nvscaleaddmultivectorarray = w->ops->nvscaleaddmultivectorarray; ops->nvlinearcombinationvectorarray = w->ops->nvlinearcombinationvectorarray; /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof(struct _N_VectorContent_OpenMPDEV)); if (content == NULL) { free(ops); free(v); return(NULL); } content->length = NV_LENGTH_OMPDEV(w); content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; /* Attach content and ops */ v->content = content; v->ops = ops; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMPDEV(N_Vector w) { N_Vector v; realtype *data; realtype *dev_data; sunindextype length; int dev; v = NULL; v = N_VCloneEmpty_OpenMPDEV(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMPDEV(w); /* Create data */ if (length > 0) { /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if(data == NULL) { N_VDestroy_OpenMPDEV(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; NV_DATA_HOST_OMPDEV(v)= data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMPDEV(N_Vector v) { int dev; if (NV_OWN_DATA_OMPDEV(v) == SUNTRUE) { /* Free host memory */ free(NV_DATA_HOST_OMPDEV(v)); NV_DATA_HOST_OMPDEV(v) = NULL; /* Free device memory */ dev = omp_get_default_device(); omp_target_free(NV_DATA_DEV_OMPDEV(v), dev); NV_DATA_DEV_OMPDEV(v) = NULL; } free(v->content); v->content = NULL; free(v->ops); v->ops = NULL; free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_OMPDEV(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd_dev, *yd_dev, *zd_dev; N_Vector v1, v2; booleantype test; int dev; xd_dev = yd_dev = zd_dev = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMPDEV(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMPDEV(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMPDEV(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMPDEV(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMPDEV(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMPDEV(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMPDEV(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMPDEV(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a,b) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMPDEV(realtype c, N_Vector z) { sunindextype i, N; realtype *zd_dev; int dev; zd_dev = NULL; N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]*yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]/yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMPDEV(c, x); return; } if (c == ONE) { VCopy_OpenMPDEV(x, z); } else if (c == -ONE) { VNeg_OpenMPDEV(x, z); } else { N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*xd_dev[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = SUNRabs(xd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = ONE/xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,b) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; sum = ZERO; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += xd_dev[i]*yd_dev[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype max, *xd_dev; int dev; max = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:max) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1) for (i = 0; i < N; i++) { max = SUNMAX(SUNRabs(xd_dev[i]), max); } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(SUNRsqrt(sum/N)); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev, *idd_dev; int dev; sum = ZERO; xd_dev = wd_dev = idd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { if (idd_dev[i] > ZERO) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } } return(SUNRsqrt(sum / N)); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype min, *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(from:min) is_device_ptr(xd_dev) device(dev) #pragma omp teams num_teams(1) { min = xd_dev[0]; #pragma omp distribute parallel for reduction(min:min) schedule(static, 1) for (i = 1; i < N; i++) { min = SUNMIN(xd_dev[i], min); } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype sum, *xd_dev; int dev; sum = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i<N; i++) sum += SUNRabs(xd_dev[i]); return(sum); } /* ---------------------------------------------------------------------------- * Compare vector component values to a scaler */ void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO; return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev, val; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); val = ZERO; #pragma omp target map(to:N) map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1) for (i = 0; i < N; i++) { if (xd_dev[i] == ZERO) val = ONE; else zd_dev[i] = ONE/xd_dev[i]; } if (val > ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd_dev, *xd_dev, *md_dev; int dev; cd_dev = xd_dev = md_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); cd_dev = NV_DATA_DEV_OMPDEV(c); md_dev = NV_DATA_DEV_OMPDEV(m); /* get default device identifier */ dev = omp_get_default_device(); temp = ONE; #pragma omp target map(to:N) map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1) for (i = 0; i < N; i++) { md_dev[i] = ZERO; if (cd_dev[i] == ZERO) continue; if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) { if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; } continue; } if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) { if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; } } } if (temp == ONE) return (SUNTRUE); else return(SUNFALSE); } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom) { sunindextype i, N; realtype *nd_dev, *dd_dev, min; int dev; nd_dev = dd_dev = NULL; N = NV_LENGTH_OMPDEV(num); nd_dev = NV_DATA_DEV_OMPDEV(num); dd_dev = NV_DATA_DEV_OMPDEV(denom); /* get default device identifier */ dev = omp_get_default_device(); min = BIG_REAL; #pragma omp target map(to:N) map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1) for (i = 0; i < N; i++) if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min); return(min); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i, dev; realtype to_add; /* temporary variable to hold sum being added in atomic operation */ sunindextype j, N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store X dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1 */ if (X[0] == z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) { #pragma omp teams distribute parallel for schedule(static,1) for (j=0; j<N; j++) zd_dev[j] *= c[0]; } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * z = sum{ c[i] * X[i] }, i = 0,...,nvec-1 */ xd_dev = NV_DATA_DEV_OMPDEV(X[0]); #pragma omp target map(to:N,c[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) { #pragma omp teams distribute parallel for schedule(static, 1) for (j=0; j<N; j++) { zd_dev[j] = c[0] * xd_dev[j]; } } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a[i] * xd_dev[j]; } } free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j]; } } free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int i, dev; sunindextype j, N; realtype sum; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** yd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VDotProd */ if (nvec == 1) { dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* initialize dot products */ for (i=0; i<nvec; i++) { dotprods[i] = ZERO; } /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* compute multiple dot products */ #pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) for (j=0; j<N; j++) sum += xd_dev[j] * yd_dev[j]; dotprods[i] += sum; } free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------- */ int N_VLinearSumVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; N_Vector* V1; N_Vector* V2; booleantype test; realtype c; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]); return(0); } /* BLAS usage: axpy y <- ax+y */ if ((b == ONE) && (Z == Y)) return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y)); /* BLAS usage: axpy x <- by+x */ if ((a == ONE) && (Z == X)) return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X)); /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z)); /* Cases: */ /* (1) a == 1.0, b = -1.0, */ /* (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { V1 = test ? Y : X; V2 = test ? X : Y; return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z)); } /* Cases: */ /* (1) a == 1.0, b == other or 0.0, */ /* (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Cases: */ /* (1) a == -1.0, b != 1.0, */ /* (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Case: a == -b */ if (a == -b) return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Do all cases not handled above: */ /* (1) a == other, b == 0.0 - user should have called N_VScale */ /* (2) a == 0.0, b == other - user should have called N_VScale */ /* (3) a,b == other, a !=b, a != -b */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* compute linear sum for each vector pair in vector arrays */ #pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a * xd_dev[j] + b * yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) { xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); } /* * X[i] *= c[i] */ if (X == Z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) xd_dev[j] *= c[i]; } } free(xd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i] = c[i] * X[i] */ #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c[i] * xd_dev[j]; } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* zd_dev=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VConst */ if (nvec == 1) { N_VConst_OpenMPDEV(c, Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get device */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* set each vector in the vector array to a constant */ #pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c; } } free(zd_dev_ptrs); return(0); } int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } nrm[i] = SUNRsqrt(sum/N); } } free(wd_dev_ptrs); free(xd_dev_ptrs); return(0); } int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype* idd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id); return(0); } /* get vector length and mask data array */ N = NV_LENGTH_OMPDEV(X[0]); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) { if (idd_dev[j] > ZERO) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } } nrm[i] = SUNRsqrt(sum/N); } } free(xd_dev_ptrs); free(wd_dev_ptrs); return(0); } int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j, dev; sunindextype k, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; int retval; N_Vector* YY; N_Vector* ZZ; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector *) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (j=0; j<nsum; j++) { YY[j] = Y[j][0]; ZZ[j] = Z[j][0]; } retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ); free(YY); free(ZZ); return(retval); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]); } /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) yd_dev[k] += a[j] * xd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]); } /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; zd_dev = zd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { int i; /* vector arrays index in summation [0,nsum) */ int j; /* vector index in vector array [0,nvec) */ sunindextype k; /* element index in vector [0,N) */ sunindextype N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** zd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; int dev; realtype* ctmp; N_Vector* Y; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VScale */ if (nsum == 1) { N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]); return(0); } /* should have called N_VLinearSum */ if (nsum == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]); return(0); } /* should have called N_VLinearCombination */ Y = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (i=0; i<nsum; i++) { Y[i] = X[i][0]; } N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]); free(Y); return(0); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; j<nvec; j++) { ctmp[j] = c[0]; } N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z); free(ctmp); return(0); } /* should have called N_VLinearSumVectorArray */ if (nsum == 2) { N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z); return(0); } /* -------------------------- * Compute linear combination * -------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (j=0; j<nvec; j++) zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]); for (j=0; j<nvec; j++) { for (i=0; i<nsum; i++) xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]); } /* * X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1 */ if ((X[0] == Z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1 */ if (X[0] == Z) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] *= c[0]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1 */ #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { /* scale first vector in the sum into the output vector */ xd_dev = xd_dev_ptrs[j*nsum]; zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = c[0] * xd_dev[k]; /* scale and sum remaining vectors into the output vector */ for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * private functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Copy vector components into a second vector */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector sum */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute the negative of a vector */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = -xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector sum */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]+yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute scaled vector difference */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]-yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute vector sum z[i] = a*x[i]+y[i] */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference z[i] = a*x[i]-y[i] */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute special cases of linear sum */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y) { sunindextype i, N; realtype *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); if (a == ONE) { #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += xd_dev[i]; return; } if (a == -ONE) { #pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] -= xd_dev[i]; return; } #pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += a*xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector x[i] = a*x[i] */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x) { sunindextype i, N; realtype *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(to:N,a) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) xd_dev[i] *= a; return; } /* * ----------------------------------------------------------------- * private functions for special cases of vector array operations * ----------------------------------------------------------------- */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] + yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev ointer to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] - yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); if (a == ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } if (a == -ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] -= xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a * xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; else v->ops->nvdotprodmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); }
minimum_cut_helpers.h
/****************************************************************************** * minimum_cut_helpers.h * * Source of VieCut * ****************************************************************************** * Copyright (C) 2018 Alexander Noe <alexander.noe@univie.ac.at> * * Published under the MIT license in the LICENSE file. *****************************************************************************/ #pragma once #include <algorithm> #include <memory> #include <unordered_map> #include <utility> #include <vector> #include "common/configuration.h" #include "data_structure/graph_access.h" #include "data_structure/mutable_graph.h" template <class GraphPtr> class minimum_cut_helpers { private: static constexpr bool debug = false; // Get index of minimum degree vertex static size_t minimumIndex(GraphPtr G) { size_t minimum_index = 0; EdgeWeight mindeg = G->getMinDegree(); for (NodeID n : G->nodes()) { if (G->getWeightedNodeDegree(n) == mindeg) { minimum_index = n; break; } } return minimum_index; } public: // set minimum cut to initial value (one minimum degree vertex) // - this cut will be updated later in the global_mincut static void setInitialCutValues( const std::vector<GraphPtr>& graphs) { if (configuration::getConfig()->save_cut) { size_t minimum_index = minimumIndex(graphs.back()); for (NodeID idx : graphs[0]->nodes()) { if (idx == minimum_index) { graphs[0]->setNodeInCut(idx, true); } else { graphs[0]->setNodeInCut(idx, false); } } } } static EdgeWeight updateCut( const std::vector<GraphPtr>& graphs, EdgeWeight previous_mincut) { if (configuration::getConfig()->save_cut) { GraphPtr new_graph = graphs.back(); if (new_graph->number_of_nodes() > 1) { if (new_graph->getMinDegree() < previous_mincut) { size_t minimum_index = minimumIndex(graphs.back()); for (NodeID idx : graphs[0]->nodes()) { NodeID coarseID = idx; for (size_t lv = 0; lv < graphs.size() - 1; ++lv) { coarseID = graphs[lv]->getPartitionIndex(coarseID); } if (coarseID == minimum_index) { graphs[0]->setNodeInCut(idx, true); } else { graphs[0]->setNodeInCut(idx, false); } } } } } if (graphs.back()->number_of_nodes() > 1) { return std::min(previous_mincut, graphs.back()->getMinDegree()); } else { return previous_mincut; } } static void retrieveMinimumCut( std::vector<GraphPtr> graphs) { GraphPtr G = graphs[0]; size_t inside = 0, outside = 0; for (NodeID n : G->nodes()) { if (G->getNodeInCut(n)) { inside++; G->setPartitionIndex(n, 0); } else { outside++; G->setPartitionIndex(n, 1); } } [[maybe_unused]] size_t smaller = 0; if (inside > outside) { smaller = 1; } #ifndef NDEBUG for (NodeID n : G->nodes()) { if (G->getPartitionIndex(n) == smaller) { std::cout << "n " << n << std::endl; } } #endif LOG << "smaller side has " << std::min(inside, outside) << " nodes."; } static std::pair<std::vector<NodeID>, std::vector<std::vector<NodeID> > > remap_cluster( GraphPtr G, const std::vector<NodeID>& cluster_id) { std::vector<NodeID> mapping; std::vector<std::vector<NodeID> > reverse_mapping; PartitionID cur_no_clusters = 0; std::unordered_map<PartitionID, PartitionID> remap; std::vector<NodeID> part(G->number_of_nodes(), UNDEFINED_NODE); const bool save_cut = configuration::getConfig()->save_cut; for (NodeID node : G->nodes()) { PartitionID cur_cluster = cluster_id[node]; // check whether we already had that if (part[cur_cluster] == UNDEFINED_NODE) { part[cur_cluster] = cur_no_clusters++; reverse_mapping.emplace_back(); } mapping.emplace_back(part[cur_cluster]); if (save_cut) { G->setPartitionIndex(node, part[cur_cluster]); } reverse_mapping[part[cur_cluster]].push_back(node); } return std::make_pair(mapping, reverse_mapping); } static void setVertexLocations( mutableGraphPtr out_graph, const std::vector<GraphPtr>& graphs, const std::vector<size_t>& ge_ids, const std::vector<std::vector<std::pair<NodeID, NodeID> > >& g_edges, const EdgeWeight mincut) { out_graph->setOriginalNodes(graphs[0]->number_of_nodes()); std::vector<NodeID> final; for (NodeID n : out_graph->nodes()) { std::vector<NodeID> empty; out_graph->setContainedVertices(n, empty); } for (NodeID n = 0; n < graphs.back()->number_of_nodes(); ++n) { graphs.back()->setPartitionIndex( n, out_graph->getCurrentPosition(n)); } int32_t g_id = ge_ids.size() - 1; for (auto i = graphs.size(); i-- > 0 && graphs.size() > 1; ) { if (i < graphs.size() - 1) { #ifdef PARALLEL #pragma omp parallel for #endif for (NodeID n = 0; n < graphs[i]->number_of_nodes(); ++n) { NodeID index = graphs[i]->getPartitionIndex(n); NodeID id_new = graphs[i + 1]->getPartitionIndex(index); graphs[i]->setPartitionIndex(n, id_new); } } if (g_id != -1 && i == ge_ids[g_id]) { for (auto e : g_edges[g_id]) { NodeID new_node = out_graph->new_empty_node(); NodeID neighbour = graphs[i]->getPartitionIndex(e.second); out_graph->new_edge_order(new_node, neighbour, mincut); graphs[i]->setPartitionIndex(e.first, new_node); } --g_id; } } if (configuration::getConfig()->save_cut) { std::vector<NodeID> b(out_graph->n(), 0); for (NodeID n : graphs[0]->nodes()) { NodeID position = graphs[0]->getPartitionIndex(n); out_graph->setCurrentPosition(n, position); out_graph->addContainedVertex(position, n); ++b[position]; } bool cut_logs = configuration::getConfig()->verbose; if (cut_logs) { std::sort(b.begin(), b.end()); printLogs(b); } else { std::sort(b.begin(), b.end()); bool verbose = configuration::getConfig()->verbose; NodeID id0 = b.end() - std::upper_bound(b.begin(), b.end(), 0); NodeID id1 = b.end() - std::upper_bound(b.begin(), b.end(), 1); LOGC(verbose) << "Largest block: " << b.back(); LOGC(verbose) << "Nonempty blocks: " << id0; LOGC(verbose) << "Nontrivial blocks: " << id1; } } } static void printLogs(const std::vector<NodeID>& b) { NodeID empty = std::lower_bound(b.begin(), b.end(), 1) - b.begin(); NodeID id1 = b.end() - std::upper_bound(b.begin(), b.end(), 1); NodeID id10 = b.end() - std::upper_bound(b.begin(), b.end(), 9); NodeID id100 = b.end() - std::upper_bound(b.begin(), b.end(), 99); NodeID id1000 = b.end() - std::upper_bound(b.begin(), b.end(), 999); NodeID id10000 = b.end() - std::upper_bound(b.begin(), b.end(), 9999); LOG1 << "--------------------------------------------------"; LOG1 << "Cut stats:"; LOG1 << "Largest block: " << b.back(); LOG1 << "Number of empty blocks: " << empty; LOG1 << "Number of nontrivial blocks: " << id1; LOG1 << "Number of blocks of size >=10: " << id10; LOG1 << "Number of blocks of size >=100: " << id100; LOG1 << "Number of blocks of size >=1000: " << id1000; LOG1 << "Number of blocks of size >=10000: " << id10000; LOG1 << "--------------------------------------------------"; } };
autocropper_core.c
/* This works has been developed at Diamond Light Source Ltd. * * Copyright 2019 Daniil Kazantsev * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "autocropper_core.h" #include "utils.h" /* A data cropping algorithm where the object of interest lies within the FOV. * The algorithm is developed mainly for cropping of tomographic projection data * */ int Autocrop_main(float *Input, float *mask_box, float *crop_indeces, float threshold, int margin_skip, int statbox_size, int increase_crop, int dimX, int dimY, int dimZ) { float *maskMean_value; maskMean_value = (float*) calloc (1,sizeof(float)); if (dimZ == 1) { /* 2D processing */ /* collecting statistics in the box */ stat_collector2D(Input, maskMean_value, statbox_size, dimX, dimY, 0); //printf("%f\n", maskMean_value[0]); /* calculating the difference of a mean of 3x3 pixel with the mean obtained in the mask */ diff_mask2D(Input, mask_box, maskMean_value, margin_skip, dimX, dimY, 0); /* getting the indeces to crop */ get_indices2D(mask_box, crop_indeces, threshold, increase_crop, dimX, dimY, 0); } else { /* 3D processing */ long k; float *crop_indeces3D_1, *crop_indeces3D_2, *crop_indeces3D_3, *crop_indeces3D_4, val_c; crop_indeces3D_1 = (float*) calloc (dimZ,sizeof(float)); crop_indeces3D_2 = (float*) calloc (dimZ,sizeof(float)); crop_indeces3D_3 = (float*) calloc (dimZ,sizeof(float)); crop_indeces3D_4 = (float*) calloc (dimZ,sizeof(float)); for(k=0; k<dimZ; k++) { stat_collector2D(Input, maskMean_value, statbox_size, dimX, dimY, k); /* calculating the difference of a mean of 3x3 pixel with the mean obtained in the mask */ diff_mask2D(Input, mask_box, maskMean_value, margin_skip, dimX, dimY, k); /* getting the indeces to crop */ get_indices2D(mask_box, crop_indeces, threshold, increase_crop, dimX, dimY, k); crop_indeces3D_1[k] = crop_indeces[0]; crop_indeces3D_2[k] = crop_indeces[1]; crop_indeces3D_3[k] = crop_indeces[2]; crop_indeces3D_4[k] = crop_indeces[3]; } /*find cropped values for 3D volume */ val_c = crop_indeces[0]; for(k=0; k<dimZ; k++) { if (crop_indeces3D_1[k] < val_c) val_c = crop_indeces3D_1[k]; } crop_indeces[0] = val_c; val_c = crop_indeces[1]; for(k=0; k<dimZ; k++) { if (crop_indeces3D_2[k] > val_c) val_c = crop_indeces3D_2[k]; } crop_indeces[1] = val_c; val_c = crop_indeces[2]; for(k=0; k<dimZ; k++) { if (crop_indeces3D_3[k] < val_c) val_c = crop_indeces3D_3[k]; } crop_indeces[2] = val_c; val_c = crop_indeces[3]; for(k=0; k<dimZ; k++) { if (crop_indeces3D_4[k] > val_c) val_c = crop_indeces3D_4[k]; } crop_indeces[3] = val_c; free(crop_indeces3D_1); free(crop_indeces3D_2); free(crop_indeces3D_3); free(crop_indeces3D_4); } free(maskMean_value); return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ void stat_collector2D(float *Input, float *maskMean_value, int statbox_size, int dimX, int dimY, int k) { /* the module places a box in the background region of the data in order to collect statistics */ int statbox_size_vert, mid_vert_index_Y, vertBox_index_up, vertBox_index_down; long i, j, counter; float meanval; mid_vert_index_Y = (int)(0.5f*dimY); statbox_size_vert = (int)(2.5f*statbox_size); /* place the box in the vertical middle of the image */ vertBox_index_up = mid_vert_index_Y-statbox_size_vert; vertBox_index_down = mid_vert_index_Y+statbox_size_vert; /* collecting statistics in the box */ counter = 0; meanval = 0.0f; for(j=0; j<dimY; j++) { if ((j >= vertBox_index_up) && (j <= vertBox_index_down)) { for(i=0; i<dimX; i++) { if (((i >= 0) && (i <= statbox_size)) || ((i >= dimX-statbox_size) && (i < dimX))) { meanval += Input[(dimX*dimY)*k + j*dimX+i]; counter++; } } } } maskMean_value[0] = meanval/(float)(counter); return; } void diff_mask2D(float *Input, float *mask_box, float *maskMean_value, int margin_skip, int dimX, int dimY, int k) { long i, j, i1, j1, j_m, i_m; float local_mean; #pragma omp parallel for shared (Input, mask_box, maskMean_value, k) private(i, j, i1, j1, j_m, i_m, local_mean) for(j=0; j<dimY; j++) { if ((j > margin_skip) && (j < dimY-margin_skip)) { for(i=0; i<dimX; i++) { if ((i > margin_skip) && (i < dimX-margin_skip)) { local_mean = 0.0f; for(j_m=-1; j_m<=1; j_m++) { for(i_m=-1; i_m<=1; i_m++) { i1 = i+i_m; j1 = j+j_m; local_mean += Input[(dimX*dimY)*k + j1*dimX+i1]; }} local_mean /= 9.0f; mask_box[j*dimX+i] = fabs(local_mean - maskMean_value[0]); } } } } return; } void get_indices2D(float *mask_box, float *crop_indeces, float threshold, int increase_crop, int dimX, int dimY, int k) { float *MeanX_vector, *MeanY_vector; MeanX_vector = (float*) calloc (dimX,sizeof(float)); MeanY_vector = (float*) calloc (dimY,sizeof(float)); long i, j; float maxvalX, maxvalY, meanval; /*get X-dim mean vector*/ for(i=0; i<dimX; i++) { meanval = 0.0; for(j=0; j<dimY; j++) { meanval += mask_box[j*dimX+i]; } MeanX_vector[i] = meanval/dimY; } /* get the max value of the X-vector */ maxvalX = 0.0f; for(i=0; i<dimX; i++) { if (MeanX_vector[i] > maxvalX) maxvalX = MeanX_vector[i]; } /*get Y-dim mean vector*/ for(j=0; j<dimY; j++) { meanval = 0.0; for(i=0; i<dimX; i++) { meanval += mask_box[j*dimX+i]; } MeanY_vector[j] = meanval/dimX; } /* get the max value of the Y-vector */ maxvalY = 0.0f; for(j=0; j<dimY; j++) { if (MeanY_vector[j] > maxvalY) maxvalY = MeanY_vector[j]; } /* find the first index of X-dim */ for(i=0; i<dimX; i++) { if (MeanX_vector[i] >= threshold*maxvalX) { crop_indeces[0] = i - increase_crop; /* first X-index*/ break; } else crop_indeces[0] = 0; /* first X-index*/ } if (crop_indeces[0] < 0) crop_indeces[0] = 0; for(i=dimX-1; i>=0; i--) { if (MeanX_vector[i] >= threshold*maxvalX) { crop_indeces[1] = i + increase_crop; /* second X-index*/ break; } else crop_indeces[1] = dimX; /* second X-index*/ } if (crop_indeces[1] > dimX) crop_indeces[1] = dimX; /* find the first index of Y-dim */ for(j=0; j<dimY; j++) { if (MeanY_vector[j] >= threshold*maxvalY) { crop_indeces[2] = j-increase_crop; /* first Y-index*/ break; } else crop_indeces[2] = 0; /* first Y-index*/ } if (crop_indeces[2] < 0) crop_indeces[2] = 0; for(j=dimY-1; j>=0; j--) { if (MeanY_vector[j] >= threshold*maxvalY) { crop_indeces[3] = j+increase_crop; /* second Y-index*/ break; } else crop_indeces[3] = dimY; /* second Y-index*/ } if (crop_indeces[3] > dimY) crop_indeces[3] = dimY; free(MeanX_vector); free(MeanY_vector); return; }
cp_up.c
//*computethepotentialenergyofacollectionof*/ //*particlesinteractingviapairwisepotential*/ // compile -lm //liyp 10.5 #include<stdio.h> #include<stdlib.h> #include<math.h> //#include<windows.h> #include<time.h> #include<omp.h> //liyp 10.05 //#define NPARTS 1000 #define NPARTS 10000 #define NITER 201 #define DIMS 3 int rand(void); double computePot(); void initPositions(void); void updatePositions(void); double r[DIMS][NPARTS]; double pot; double distx,disty,distz,dist; double **poot=NULL; int main(){ int i; double time = 0.0; poot = (double **)malloc(sizeof(double *)*NPARTS); for(i=0; i<NPARTS; i++) *(poot+i)=(double *)malloc(sizeof(double)*NPARTS); initPositions(); updatePositions(); for(i=0;i<NITER;i++) { pot = 0; time += computePot(); if(i%10 == 0) printf("%5d:Potential:%10.7f \n",i,pot); updatePositions(); } printf ( " Time = %g seconds.\n", time); for(i=0; i<NPARTS; i++) free(*(poot+i)); free(poot); } void initPositions(){ int i,j; for(i=0;i<DIMS;i++) for(j=0;j<NPARTS;j++) r[i][j] = 0.5+((double)rand()/(double)RAND_MAX); } void updatePositions(){ int i,j; for(i=0;i<DIMS;i++) for(j=0;j<NPARTS;j++) r[i][j] -= 0.5+((double)rand()/(double)RAND_MAX); } double computePot(){ int i,j; double wtime; wtime = omp_get_wtime(); #pragma omp parallel for schedule(static) private(distx,disty,distz,dist,i,j) num_threads(7) for(i=0;i<NPARTS;i++){ for(j=0;j<i-1;j++){ distx = pow((r[0][j]-r[0][i]),2); disty = pow((r[1][j]-r[1][i]),2); distz = pow((r[2][j]-r[2][i]),2); dist = sqrt(distx+disty+distz); //pot = pot + 1.0/dist; poot[i][j] = 1.0/dist; } } wtime = omp_get_wtime() - wtime; for(i=0;i<NPARTS;i++){ for(j=0;j<i-1;j++){ pot = pot + poot[i][j]; } } return wtime; }
BLAS.h
// // Created by kazem on 7/18/17. // #ifndef TRIANGOPENMP_BLAS_H #define TRIANGOPENMP_BLAS_H namespace nasoq { void dlsolve_blas_nonUnit(int ldm, int ncol, double *M, double *rhs)//general triangular solver { int k; double x0, x1, x2, x3, x4, x5, x6, x7; double *M0; register double *Mki0, *Mki1, *Mki2, *Mki3, *Mki4, *Mki5, *Mki6, *Mki7; register int firstcol = 0; M0 = &M[0]; while (firstcol < ncol - 7) { /* Do 8 columns */ Mki0 = M0; Mki1 = Mki0 + ldm + 1; Mki2 = Mki1 + ldm + 1; Mki3 = Mki2 + ldm + 1; Mki4 = Mki3 + ldm + 1; Mki5 = Mki4 + ldm + 1; Mki6 = Mki5 + ldm + 1; Mki7 = Mki6 + ldm + 1; x0 = rhs[firstcol] / *Mki0++; x1 = (rhs[firstcol + 1] - x0 * *Mki0++) / *Mki1++; x2 = (rhs[firstcol + 2] - x0 * *Mki0++ - x1 * *Mki1++) / *Mki2++; x3 = (rhs[firstcol + 3] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++) / *Mki3++; x4 = (rhs[firstcol + 4] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++) / *Mki4++; x5 = (rhs[firstcol + 5] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++) / *Mki5++; x6 = (rhs[firstcol + 6] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++ - x5 * *Mki5++) / *Mki6++; x7 = (rhs[firstcol + 7] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++ - x5 * *Mki5++ - x6 * *Mki6++) / *Mki7++; rhs[firstcol++] = x0; rhs[firstcol++] = x1; rhs[firstcol++] = x2; rhs[firstcol++] = x3; rhs[firstcol++] = x4; rhs[firstcol++] = x5; rhs[firstcol++] = x6; rhs[firstcol++] = x7; for (k = firstcol; k < ncol; k++) rhs[k] = rhs[k] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++ - x4 * *Mki4++ - x5 * *Mki5++ - x6 * *Mki6++ - x7 * *Mki7++; M0 += 8 * ldm + 8; } while (firstcol < ncol - 3) { /* Do 4 columns */ Mki0 = M0; Mki1 = Mki0 + ldm + 1; Mki2 = Mki1 + ldm + 1; Mki3 = Mki2 + ldm + 1; x0 = rhs[firstcol] / *Mki0++; x1 = (rhs[firstcol + 1] - x0 * *Mki0++) / *Mki1++; x2 = (rhs[firstcol + 2] - x0 * *Mki0++ - x1 * *Mki1++) / *Mki2++; x3 = (rhs[firstcol + 3] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++) / *Mki3++; rhs[firstcol++] = x0; rhs[firstcol++] = x1; rhs[firstcol++] = x2; rhs[firstcol++] = x3; for (k = firstcol; k < ncol; k++) rhs[k] = rhs[k] - x0 * *Mki0++ - x1 * *Mki1++ - x2 * *Mki2++ - x3 * *Mki3++; M0 += 4 * ldm + 4; } if (firstcol < ncol - 1) { /* Do 2 columns */ Mki0 = M0; Mki1 = Mki0 + ldm + 1; x0 = rhs[firstcol] / *Mki0++; x1 = (rhs[firstcol + 1] - x0 * *Mki0++) / *Mki1++; rhs[firstcol++] = x0; rhs[firstcol++] = x1; for (k = firstcol; k < ncol; k++) rhs[k] = rhs[k] - x0 * *Mki0++ - x1 * *Mki1++; M0 += 2 * ldm + 2; } if (firstcol == ncol - 1) { /* Do 1 columns */ Mki0 = M0; x0 = rhs[firstcol] / *Mki0; rhs[firstcol] = x0; } } void lSolve_dense_col_sync(int colSize, int col, double *M, double *rhs) { //#pragma omp critical for (int i = 0; i < col; ++i) { //#pragma omp atomic rhs[i] /= M[i * colSize + i]; for (int j = i + 1; j < col; ++j) { double tmp = M[i * colSize + j] * rhs[i]; //#pragma omp atomic rhs[j] -= tmp; } } //return 1; } void dmatvec_blas( int ldm, /* in -- leading dimension of M */ int nrow, /* in */ int ncol, /* in */ double *M, /* in */ double *vec, /* in */ double *Mxvec /* in/out */ ) { double vi0, vi1, vi2, vi3, vi4, vi5, vi6, vi7; double *M0; register double *Mki0, *Mki1, *Mki2, *Mki3, *Mki4, *Mki5, *Mki6, *Mki7; register int firstcol = 0; int k; M0 = &M[0]; while (firstcol < ncol - 7) { /* Do 8 columns */ Mki0 = M0; Mki1 = Mki0 + ldm; Mki2 = Mki1 + ldm; Mki3 = Mki2 + ldm; Mki4 = Mki3 + ldm; Mki5 = Mki4 + ldm; Mki6 = Mki5 + ldm; Mki7 = Mki6 + ldm; vi0 = vec[firstcol++]; vi1 = vec[firstcol++]; vi2 = vec[firstcol++]; vi3 = vec[firstcol++]; vi4 = vec[firstcol++]; vi5 = vec[firstcol++]; vi6 = vec[firstcol++]; vi7 = vec[firstcol++]; for (k = 0; k < nrow; k++) Mxvec[k] += vi0 * *Mki0++ + vi1 * *Mki1++ + vi2 * *Mki2++ + vi3 * *Mki3++ + vi4 * *Mki4++ + vi5 * *Mki5++ + vi6 * *Mki6++ + vi7 * *Mki7++; M0 += 8 * ldm; } while (firstcol < ncol - 3) { /* Do 4 columns */ Mki0 = M0; Mki1 = Mki0 + ldm; Mki2 = Mki1 + ldm; Mki3 = Mki2 + ldm; vi0 = vec[firstcol++]; vi1 = vec[firstcol++]; vi2 = vec[firstcol++]; vi3 = vec[firstcol++]; for (k = 0; k < nrow; k++) Mxvec[k] += vi0 * *Mki0++ + vi1 * *Mki1++ + vi2 * *Mki2++ + vi3 * *Mki3++; M0 += 4 * ldm; } while (firstcol < ncol) { /* Do 1 column */ Mki0 = M0; vi0 = vec[firstcol++]; for (k = 0; k < nrow; k++) Mxvec[k] += vi0 * *Mki0++; M0 += ldm; } } } #endif //TRIANGOPENMP_BLAS_H
task_dep-2.c
/* { dg-do run } */ #include <stdlib.h> int main() { int x = 1; #pragma omp parallel #pragma omp single { #pragma omp task shared(x) depend(in: x) if (x != 1) abort (); #pragma omp task shared(x) depend(out: x) x = 2; } return 0; }
GB_msort_2.c
//------------------------------------------------------------------------------ // GB_msort_2: sort a 2-by-n list of integers, using A[0:1][ ] as the key //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // A parallel mergesort of an array of 2-by-n integers. Each key // consists of two integers. #include "GB_msort_2.h" //------------------------------------------------------------------------------ // GB_msort_2_binary_search: binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_msort_2_binary_search // return pleft ( const int64_t *restrict Y_0, // Pivot is Y [pivot] const int64_t *restrict Y_1, const int64_t pivot, const int64_t *restrict X_0, // search in X [p_start..p_end_-1] const int64_t *restrict X_1, const int64_t p_start, const int64_t p_end ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // less = (X [pmiddle] < Pivot) bool less = GB_lt_2 (X_0, X_1, pmiddle, Y_0, Y_1, pivot) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && GB_eq_2 (X_0, X_1, pleft, Y_0, Y_1, pivot) ; // Modify pleft and pright: if (!found && (pleft == pright)) { if (GB_lt_2 (X_0, X_1, pleft, Y_0, Y_1, pivot)) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_msort_2_create_merge_tasks //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. void GB_msort_2_create_merge_tasks ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const int64_t *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t *restrict L_1, const int64_t pL_start, const int64_t pL_end, const int64_t *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t *restrict R_1, const int64_t pR_start, const int64_t pR_end ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_msort_2_binary_search ( L_0, L_1, pleft, R_0, R_1, pR_start, pR_end) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_msort_2_binary_search ( R_0, R_1, pright, L_0, L_1, pL_start, pL_end) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_msort_2_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, L_1, pL_start, pleft, R_0, R_1, pR_start, pright) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_msort_2_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, L_1, pleft, pL_end, R_0, R_1, pright, pR_end) ; } } //------------------------------------------------------------------------------ // GB_msort_2_merge: merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_msort_2_merge ( int64_t *restrict S_0, // output of length nleft + nright int64_t *restrict S_1, const int64_t *restrict Left_0, // left input of length nleft const int64_t *restrict Left_1, const int64_t nleft, const int64_t *restrict Right_0, // right input of length nright const int64_t *restrict Right_1, const int64_t nright ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { if (GB_lt_2 (Left_0, Left_1, pleft, Right_0, Right_1, pright)) { // S [p] = Left [pleft++] S_0 [p] = Left_0 [pleft] ; S_1 [p] = Left_1 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] S_0 [p] = Right_0 [pright] ; S_1 [p] = Right_1 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ; memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ; memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_msort_2: parallel mergesort //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_msort_2 // sort array A of size 2-by-n, using 2 keys (A [0:1][]) ( int64_t *restrict A_0, // size n array int64_t *restrict A_1, // size n array const int64_t n, int nthreads // # of threads to use ) { //-------------------------------------------------------------------------- // handle small problems with a single thread //-------------------------------------------------------------------------- if (nthreads <= 1 || n <= GB_BASECASE) { // sequential quicksort GB_qsort_2 (A_0, A_1, n) ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // determine # of tasks //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 qsort leaves // 5 to 16 threads: 6 levels, 64 qsort leaves // 17 to 64 threads: 8 levels, 256 qsort leaves // 65 to 256 threads: 10 levels, 1024 qsort leaves // 256 to 1024 threads: 12 levels, 4096 qsort leaves // ... int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks = 1 << k ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- int64_t *restrict W = NULL ; size_t W_size = 0 ; W = GB_MALLOC_WORK (2*n + 6*ntasks + 1, int64_t, &W_size) ; if (W == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *T = W ; int64_t *restrict W_0 = T ; T += n ; int64_t *restrict W_1 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; GB_qsort_2 (A_0 + leaf, A_1 + leaf, leafsize) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for ( ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_msort_2_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, A_1, Slice [tid], Slice [tid+nt], A_0, A_1, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_msort_2_merge ( W_0 + pS, W_1 + pS, A_0 + pL, A_1 + pL, nL, A_0 + pR, A_1 + pR, nR) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_msort_2_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, W_1, Slice [tid], Slice [tid+nt], W_0, W_1, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_msort_2_merge ( A_0 + pS, A_1 + pS, W_0 + pL, W_1 + pL, nL, W_0 + pR, W_1 + pR, nR) ; } nt = 2*nt ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK (&W, W_size) ; return (GrB_SUCCESS) ; }
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 64 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 1 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
crop_and_resize.c
#include <TH/TH.h> #include <stdio.h> #include <math.h> void CropAndResizePerBox( const float * image_data, const int batch_size, const int depth, const int image_height, const int image_width, const float * boxes_data, const int * box_index_data, const int start_box, const int limit_box, float * corps_data, const int crop_height, const int crop_width, const float extrapolation_value ) { const int image_channel_elements = image_height * image_width; const int image_elements = depth * image_channel_elements; const int channel_elements = crop_height * crop_width; const int crop_elements = depth * channel_elements; int b; #pragma omp parallel for for (b = start_box; b < limit_box; ++b) { const float * box = boxes_data + b * 4; const float y1 = box[0]; const float x1 = box[1]; const float y2 = box[2]; const float x2 = box[3]; const int b_in = box_index_data[b]; if (b_in < 0 || b_in >= batch_size) { printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size); exit(-1); } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { for (int x = 0; x < crop_width; ++x) { for (int d = 0; d < depth; ++d) { // crops(b, y, x, d) = extrapolation_value; corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value; } } continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { for (int d = 0; d < depth; ++d) { corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value; } continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float *pimage = image_data + b_in * image_elements + d * image_channel_elements; const float top_left = pimage[top_y_index * image_width + left_x_index]; const float top_right = pimage[top_y_index * image_width + right_x_index]; const float bottom_left = pimage[bottom_y_index * image_width + left_x_index]; const float bottom_right = pimage[bottom_y_index * image_width + right_x_index]; const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp; } } // end for x } // end for y } // end for b } void crop_and_resize_forward( THFloatTensor * image, THFloatTensor * boxes, // [y1, x1, y2, x2] THIntTensor * box_index, // range in [0, batch_size) const float extrapolation_value, const int crop_height, const int crop_width, THFloatTensor * crops ) { const int batch_size = image->size[0]; const int depth = image->size[1]; const int image_height = image->size[2]; const int image_width = image->size[3]; const int num_boxes = boxes->size[0]; // init output space THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width); THFloatTensor_zero(crops); // crop_and_resize for each box CropAndResizePerBox( THFloatTensor_data(image), batch_size, depth, image_height, image_width, THFloatTensor_data(boxes), THIntTensor_data(box_index), 0, num_boxes, THFloatTensor_data(crops), crop_height, crop_width, extrapolation_value ); } void crop_and_resize_backward( THFloatTensor * grads, THFloatTensor * boxes, // [y1, x1, y2, x2] THIntTensor * box_index, // range in [0, batch_size) THFloatTensor * grads_image // resize to [bsize, c, hc, wc] ) { // shape const int batch_size = grads_image->size[0]; const int depth = grads_image->size[1]; const int image_height = grads_image->size[2]; const int image_width = grads_image->size[3]; const int num_boxes = grads->size[0]; const int crop_height = grads->size[2]; const int crop_width = grads->size[3]; // n_elements const int image_channel_elements = image_height * image_width; const int image_elements = depth * image_channel_elements; const int channel_elements = crop_height * crop_width; const int crop_elements = depth * channel_elements; // init output space THFloatTensor_zero(grads_image); // data pointer const float * grads_data = THFloatTensor_data(grads); const float * boxes_data = THFloatTensor_data(boxes); const int * box_index_data = THIntTensor_data(box_index); float * grads_image_data = THFloatTensor_data(grads_image); for (int b = 0; b < num_boxes; ++b) { const float * box = boxes_data + b * 4; const float y1 = box[0]; const float x1 = box[1]; const float y2 = box[2]; const float x2 = box[3]; const int b_in = box_index_data[b]; if (b_in < 0 || b_in >= batch_size) { printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size); exit(-1); } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements; const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x]; const float dtop = (1 - y_lerp) * grad_val; pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop; pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop; const float dbottom = y_lerp * grad_val; pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom; pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom; } // end d } // end x } // end y } // end b }
sageInterface.h
#ifndef ROSE_SAGE_INTERFACE #define ROSE_SAGE_INTERFACE #include "sage3basic.hhh" #include <stdint.h> #include <utility> #include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT #include "OmpAttribute.h" #if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project ); #else SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project ); #endif #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "rewrite.h" #endif // DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser. #include "astUnparseAttribute.h" #include <set> #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "LivenessAnalysis.h" #include "abstract_handle.h" #include "ClassHierarchyGraph.h" #endif // DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h //! A global function for getting the string associated with an enum (which is defined in global scope) ROSE_DLL_API std::string getVariantName (VariantT v); // DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE // This namespace is specific to interface functions that operate on the Sage III AST. // The name was chosen so as not to conflict with other classes within ROSE. // This will become the future home of many interface functions which operate on // the AST and which are generally useful to users. As a namespace multiple files can be used // to represent the compete interface and different developers may contribute interface // functions easily. // Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008) // We could add simpler layers of support for construction of IR nodes by // hiding many details in "makeSg***()" functions. Such functions would // return pointers to the associated Sg*** objects and would be able to hide // many IR specific details, including: // memory handling // optional parameter settings not often required // use of Sg_File_Info objects (and setting them as transformations) // // namespace AST_Interface (this name is taken already by some of Qing's work :-) //! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode() #define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode() /** Functions that are useful when operating on the AST. * * The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate * higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support * numerous types of operations that are common to general analysis and transformation of the AST. */ namespace SageInterface { // Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar. struct Transformation_Record { // a lookup table to check if a for loop has been normalized for its c99-style init-stmt std::map <SgForStatement* , bool > forLoopInitNormalizationTable; // Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair) std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord; } ; ROSE_DLL_API extern Transformation_Record trans_records; // DQ (4/3/2014): Added general AST support separate from the AST. // Container and API for analysis information that is outside of the AST and as a result // prevents frequent modification of the IR. class DeclarationSets { // DQ (4/3/2014): This stores all associated declarations as a map of sets. // the key to the map is the first nondefining declaration and the elements of the set are // all of the associated declarations (including the defining declaration). private: //! Map of first-nondefining declaration to all other associated declarations. std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap; public: void addDeclaration(SgDeclarationStatement* decl); const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl); std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap(); bool isLocatedInDefiningScope(SgDeclarationStatement* decl); }; // DQ (4/3/2014): This constructs a data structure that holds analysis information about // the AST that is separate from the AST. This is intended to be a general mechanism // to support analysis information without constantly modifying the IR. DeclarationSets* buildDeclarationSets(SgNode*); //! An internal counter for generating unique SgName ROSE_DLL_API extern int gensym_counter; #ifdef ROSE_ENABLE_BINARY_ANALYSIS //! Find the main interpretation. SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file); //! Get the unsigned value of a disassembled constant. uint64_t getAsmConstant(SgAsmValueExpression* e); //! Get the signed value of a disassembled constant. int64_t getAsmSignedConstant(SgAsmValueExpression *e); #endif //! Function to add "C" style comment to statement. void addMessageStatement( SgStatement* stmt, std::string message ); //! A persistent attribute to represent a unique name for an expression class UniqueNameAttribute : public AstAttribute { private: std::string name; public: UniqueNameAttribute(std::string n="") {name =n; }; void set_name (std::string n) {name = n;}; std::string get_name () {return name;}; }; //------------------------------------------------------------------------ //@{ /*! @name Symbol tables \brief utility functions for symbol tables */ // DQ (8/5/2020): the "using namespace" directive will not hide existing visability of symbols in resolving visability. // So we need to test if a symbol is visible exclusing matching alises due to using direectives before we can decide to // persue name space qualification. This is best demonstrated by Cxx_tests/test2020_18.C, test2020_19.C, test2020_20.C, // and test2020_21.C. ROSE_DLL_API SgSymbol *lookupSymbolInParentScopesIgnoringAliasSymbols (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. //! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL. // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList); ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); // Liao 1/22/2008, used for get symbols for generating variable reference nodes // ! Find a variable symbol in current and ancestor scopes for a given name ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing. //!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL); // Liao, 1/24/2008, find exact match for a function //!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, const SgType* t, SgScopeStatement *currentScope=NULL); ROSE_DLL_API SgFunctionSymbol *lookupTemplateFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL); ROSE_DLL_API SgFunctionSymbol *lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL); ROSE_DLL_API SgTemplateVariableSymbol * lookupTemplateVariableSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList * tplparams, SgTemplateArgumentPtrList* tplargs, SgScopeStatement *currentScope=NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. // DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support). // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgNonrealSymbol* lookupNonrealSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); #if 0 // DQ (8/13/2013): This function does not make since any more, now that we have made the symbol // table handling more precise and we have to provide template parameters for any template lookup. // We also have to know if we want to lookup template classes, template functions, or template // member functions (since each have specific requirements). SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); #endif #if 0 // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. // Where these are called we might not know enough information about the template parameters or function // types, for example. SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); #endif // DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments. // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL); ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL); // DQ (7/17/2011): Added function from cxx branch that I need here for the Java support. // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope); /*! \brief set_name of symbol in symbol table. This function extracts the symbol from the relavant symbol table, changes the name (at the declaration) and reinserts it into the symbol table. \internal I think this is what this function does, I need to double check. */ // DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName // to this location where it can be a part of the interface for the Sage III AST. ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name); /*! \brief Output function type symbols in global function type symbol table. */ void outputGlobalFunctionTypeSymbolTable (); // DQ (6/27/2005): /*! \brief Output the local symbol tables. \implementation Each symbol table is output with the file infor where it is located in the source code. */ ROSE_DLL_API void outputLocalSymbolTables (SgNode * node); class OutputLocalSymbolTables:public AstSimpleProcessing { public: void visit (SgNode * node); }; /*! \brief Regenerate the symbol table. \implementation current symbol table must be NULL pointer before calling this function (for safety, but is this a good idea?) */ // DQ (9/28/2005): void rebuildSymbolTable (SgScopeStatement * scope); /*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted. */ void clearUnusedVariableSymbols (SgNode* root = NULL); // DQ (3/1/2009): //! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table. void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help ); //@} //------------------------------------------------------------------------ //@{ /*! @name Stringify \brief Generate a useful string (name) to describe a SgNode */ /*! \brief Generate a useful name to describe the SgNode \internal default names are used for SgNode objects that can not be associated with a name. */ // DQ (9/21/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgNode * node); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgStatement * stmt); /*! \brief Generate a useful name to describe the expression \internal default names are used for expressions that can not be associated with a name. */ std::string get_name (const SgExpression * expr); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgDeclarationStatement * declaration); /*! \brief Generate a useful name to describe the scope \internal default names are used for scope that cannot be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgScopeStatement * scope); /*! \brief Generate a useful name to describe the SgSymbol \internal default names are used for SgSymbol objects that cannot be associated with a name. */ // DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support). std::string get_name (const SgSymbol * symbol); /*! \brief Generate a useful name to describe the SgType \internal default names are used for SgType objects that cannot be associated with a name. */ std::string get_name (const SgType * type); /*! \brief Generate a useful name to describe the SgSupport IR node */ std::string get_name (const SgSupport * node); /*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node */ std::string get_name (const SgLocatedNodeSupport * node); /*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node */ std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive ); /*! \brief Generate a useful name to describe the SgToken IR node */ std::string get_name ( const SgToken* token ); /*! \brief Returns the type introduced by a declaration. */ // PP (11/22/2021): General function for extracting the type of declarations (when they declare types) SgType* getDeclaredType(const SgDeclarationStatement* declaration); // DQ (3/20/2016): Added to refactor some of the DSL infrastructure support. /*! \brief Generate a useful name to support construction of identifiers from declarations. This function permits names to be generated that will be unique across translation units (a specific requirement different from the context of the get_name() functions above). \internal This supports only a restricted set of declarations presently. */ std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration ); std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration ); /*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function. */ extern std::map<std::string,int> local_name_collision_map; extern std::map<std::string,SgNode*> local_name_to_node_map; extern std::map<SgNode*,std::string> local_node_to_name_map; /*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function. */ void computeUniqueNameForUseAsIdentifier( SgNode* astNode ); /*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function. */ void reset_name_collision_map(); //@} //------------------------------------------------------------------------ //@{ /*! @name Class utilities \brief */ /*! \brief Get the default destructor from the class declaration */ // DQ (6/21/2005): Get the default destructor from the class declaration ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration* classDeclaration); /*! \brief Get the default constructor from the class declaration */ // DQ (6/22/2005): Get the default constructor from the class declaration ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration* classDeclaration); /*! \brief Return true if template definition is in the class, false if outside of class. */ // DQ (8/27/2005): ROSE_DLL_API bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl* memberFunctionDeclaration); /*! \brief Generate a non-defining (forward) declaration from a defining function declaration. \internal should put into sageBuilder ? */ // DQ (9/17/2005): ROSE_DLL_API SgTemplateInstantiationMemberFunctionDecl* buildForwardFunctionDeclaration (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! Check if a SgNode is a declaration for a structure ROSE_DLL_API bool isStructDeclaration(SgNode * node); //! Check if a SgNode is a declaration for a union ROSE_DLL_API bool isUnionDeclaration(SgNode * node); #if 0 // DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration // (so that it can handle template functions and member functions) /*! \brief Return true if member function of a template member function, of false if a non-template member function in a templated class. */ // DQ (8/27/2005): bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl* memberFunctionDeclaration); #endif // DQ (11/9/2020): Added function to support adding a default constructor definition to a class // if it does not have a default constructor, but has any other constructor that would prevend // a compiler generated default constructor from being generated by the compiler. // Note the physical_file_id is so that it can be marked to be unparsed when header file unparsing is active. ROSE_DLL_API bool addDefaultConstructorIfRequired ( SgClassType* classType, int physical_file_id = Sg_File_Info::TRANSFORMATION_FILE_ID ); //@} //------------------------------------------------------------------------ //@{ /*! @name Misc. \brief Not sure the classifications right now */ //! Recursively print current and parent nodes. used within gdb to probe the context of a node. void recursivePrintCurrentAndParent (SgNode* n) ; //! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf. void saveToPDF(SgNode* node, std::string filename); void saveToPDF(SgNode* node); // enable calling from gdb //! Pretty print AST horizontally, output to std output void printAST (SgNode* node); //! Pretty print AST horizontally, output to a specified text file. void printAST2TextFile (SgNode* node, const char* filename); void printAST2TextFile (SgNode* node, std::string filename); // DQ (2/12/2012): Added some diagnostic support. //! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened. void whereAmI(SgNode* node); //! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp". std::string extractPragmaKeyword(const SgPragmaDeclaration *); //! Check if a node is SgOmp*Statement ROSE_DLL_API bool isOmpStatement(SgNode* ); /*! \brief Return true if function is overloaded. */ // DQ (8/27/2005): bool isOverloaded (SgFunctionDeclaration * functionDeclaration); // DQ (2/14/2012): Added support function used for variable declarations in conditionals. //! Support function used for variable declarations in conditionals void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body); //! Support function used for variable declarations in conditionals void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body); //! Support function used for variable declarations in conditionals void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body); //! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute") void annotateExpressionsWithUniqueNames (SgProject* project); //! Check if a SgNode is a main() function declaration ROSE_DLL_API bool isMain (const SgNode* node); // DQ (6/22/2005): /*! \brief Generate unique name from C and C++ constructs. The name may contain space. This is support for the AST merge, but is generally useful as a more general mechanism than name mangling which is more closely ties to the generation of names to support link-time function name resolution. This is more general than common name mangling in that it resolves more relevant differences between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;"). \implementation current work does not support expressions. */ std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations); /** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter. * @param baseName the word to be included in the variable names. */ std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp"); // DQ (8/10/2010): Added const to first parameter. // DQ (3/10/2007): //! Generate a unique string from the source file position information std::string declarationPositionString (const SgDeclarationStatement * declaration); // DQ (1/20/2007): //! Added mechanism to generate project name from list of file names ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false ); //! Given a SgExpression that represents a named function (or bound member //! function), return the mentioned function SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func); //! Get the mask expression from the header of a SgForAllStatement SgExpression* forallMaskExpression(SgForAllStatement* stmt); //! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t); // DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation). /*! \brief Support for faster mangled name generation (caching avoids recomputation). */ #ifndef SWIG // DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time). void clearMangledNameCache (SgGlobal * globalScope); void resetMangledNameCache (SgGlobal * globalScope); #endif std::string getMangledNameFromCache (SgNode * astNode); std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName); SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically //! Used to have a struct declaration embedded into a variable declaration void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl); // DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the // bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration ); //! Check if a defining declaration comes before of after the non-defining declaration. bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration); // DQ (10/19/2006): Function calls have interesting context dependent rules to determine if // they are output with a global qualifier or not. Were this is true we have to avoid global // qualifiers, since the function's scope has not been defined. This is an example of where // qualification of function names in function calls are context dependent; an interesting // example of where the C++ language is not friendly to source-to-source processing :-). bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall); /*! \brief Compute the intersection set for two ASTs. This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST. */ ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL); //! Deep copy an arbitrary subtree ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree); //! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e); template <typename NodeType> NodeType* deepCopy (const NodeType* subtree) { return dynamic_cast<NodeType*>(deepCopyNode(subtree)); } //! Deep copy an expression ROSE_DLL_API SgExpression* copyExpression(SgExpression* e); //!Deep copy a statement ROSE_DLL_API SgStatement* copyStatement(SgStatement* s); // from VarSym.cc in src/midend/astOutlining/src/ASTtools //! Get the variable symbol for the first initialized name of a declaration stmt. ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl); //! Get the first initialized name of a declaration statement ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl); //! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now. ROSE_DLL_API void myRemoveStatement(SgStatement* stmt); ROSE_DLL_API bool isConstantTrue(SgExpression* e); ROSE_DLL_API bool isConstantFalse(SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e); //! Check if a declaration has a "static' modifier bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt); //! Set a declaration as static ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt); //! Check if a declaration has an "extern" modifier ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt); //! Set a declaration as extern ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt); //! True if an SgInitializedName is "mutable' (has storage modifier set) bool ROSE_DLL_API isMutable(SgInitializedName* name); //! True if a parameter name is a Jovial output parameter bool ROSE_DLL_API isJovialOutParam(SgInitializedName* name); //! Get a vector of Jovial input parameters from the function parameter list (may work for Fortran in the future) std::vector<SgInitializedName*> getInParameters(const SgInitializedNamePtrList &params); //! Get a vector of Jovial output parameters from the function parameter list (may work for Fortran in the future) std::vector<SgInitializedName*> getOutParameters(const SgInitializedNamePtrList &params); //! Interface for creating a statement whose computation writes its answer into //! a given variable. class StatementGenerator { public: virtual ~StatementGenerator() {}; virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0; }; //! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc) //! //! Return the left hand, right hand expressions and if the left hand variable is also being read bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL); //! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used. ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true); //! Build an abstract handle from an AST node, reuse previously built handle when possible ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*); //! Obtain a matching SgNode from an abstract handle string ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string); //! Dump information about a SgNode for debugging ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc=""); //! Reorder a list of declaration statements based on their appearance order in source files ROSE_DLL_API std::vector<SgDeclarationStatement*> sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec); // DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names. //! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc. // bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp ); bool isPrefixOperator( SgExpression* exp ); //! Check for proper names of possible prefix operators (used in isPrefixOperator()). bool isPrefixOperatorName( const SgName & functionName ); //! Is an overloaded operator a postfix operator. (e.g. ). bool isPostfixOperator( SgExpression* exp ); //! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()). bool isIndexOperator( SgExpression* exp ); // DQ (1/10/2014): Adding more general support for token based unparsing. //! Used to support token unparsing (when the output the trailing token sequence). SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap); // DQ (8/12/2020): Check the access permissions of all defining and nodefining declarations. void checkAccessPermissions ( SgNode* ); // DQ (8/14/2020): Check the symbol tables for specific scopes (debugging support). void checkSymbolTables ( SgNode* ); // DQ (11/9/2020): Added support for makring IR nodes and subtrees of the AST to be unparsed (physical_file_id // is required when unparsing header files is true or support multiple files and shared IR nodes). void markSubtreeToBeUnparsed(SgNode* root, int physical_file_id); void markNodeToBeUnparsed(SgNode* node, int physical_file_id); //@} //------------------------------------------------------------------------ //@{ /*! @name AST properties \brief version, language properties of current AST. */ // DQ (11/25/2020): Add support to set this as a specific language kind file (there is at least one language kind file processed by ROSE). // The value of 0 allows the old implementation to be tested, and the value of 1 allows the new optimized implementation to be tested. // However to get all of the functions to be inlined, we have to recompile all of ROSE. #define INLINE_OPTIMIZED_IS_LANGUAGE_KIND_FUNCTIONS 1 // std::string version(); // utility_functions.h, version number /*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use! */ #if INLINE_OPTIMIZED_IS_LANGUAGE_KIND_FUNCTIONS ROSE_DLL_API inline bool is_Ada_language () { return Rose::is_Ada_language; } ROSE_DLL_API inline bool is_C_language () { return Rose::is_C_language; } ROSE_DLL_API inline bool is_Cobol_language () { return Rose::is_Cobol_language; } ROSE_DLL_API inline bool is_OpenMP_language () { return Rose::is_OpenMP_language; } ROSE_DLL_API inline bool is_UPC_language () { return Rose::is_UPC_language; } ROSE_DLL_API inline bool is_UPC_dynamic_threads() { return Rose::is_UPC_dynamic_threads; } ROSE_DLL_API inline bool is_C99_language () { return Rose::is_C99_language; } ROSE_DLL_API inline bool is_Cxx_language () { return Rose::is_Cxx_language; } ROSE_DLL_API inline bool is_Java_language () { return Rose::is_Java_language; } ROSE_DLL_API inline bool is_Jovial_language () { return Rose::is_Jovial_language; } ROSE_DLL_API inline bool is_Fortran_language () { return Rose::is_Fortran_language; } ROSE_DLL_API inline bool is_CAF_language () { return Rose::is_CAF_language; } ROSE_DLL_API inline bool is_PHP_language() { return Rose::is_PHP_language; } ROSE_DLL_API inline bool is_Python_language() { return Rose::is_Python_language; } ROSE_DLL_API inline bool is_Cuda_language() { return Rose::is_Cuda_language; } ROSE_DLL_API inline bool is_OpenCL_language() { return Rose::is_OpenCL_language; } ROSE_DLL_API inline bool is_X10_language() { return Rose::is_X10_language; } ROSE_DLL_API inline bool is_binary_executable() { return Rose::is_binary_executable; } #else ROSE_DLL_API bool is_Ada_language (); ROSE_DLL_API bool is_C_language (); ROSE_DLL_API bool is_Cobol_language (); ROSE_DLL_API bool is_OpenMP_language (); ROSE_DLL_API bool is_UPC_language (); //! Check if dynamic threads compilation is used for UPC programs ROSE_DLL_API bool is_UPC_dynamic_threads(); ROSE_DLL_API bool is_C99_language (); ROSE_DLL_API bool is_Cxx_language (); ROSE_DLL_API bool is_Java_language (); ROSE_DLL_API bool is_Jovial_language (); ROSE_DLL_API bool is_Fortran_language (); ROSE_DLL_API bool is_CAF_language (); ROSE_DLL_API bool is_PHP_language(); ROSE_DLL_API bool is_Python_language(); ROSE_DLL_API bool is_Cuda_language(); ROSE_DLL_API bool is_OpenCL_language(); ROSE_DLL_API bool is_X10_language(); ROSE_DLL_API bool is_binary_executable(); #endif ROSE_DLL_API bool is_mixed_C_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_language (); ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language (); ROSE_DLL_API bool is_language_case_insensitive (); ROSE_DLL_API bool language_may_contain_nondeclarations_in_scope (); //@} //------------------------------------------------------------------------ //@{ /*! @name Scope \brief */ // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Assigns unique numbers to each SgScopeStatement of a function. This is used to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void resetScopeNumbers (SgFunctionDefinition * functionDeclaration); // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Clears the cache of scope,integer pairs for the input function. This is used to clear the cache of computed unique labels for scopes in a function. This function should be called after any transformation on a function that might effect the allocation of scopes and cause the existing unique numbers to be incorrect. This is part of support to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void clearScopeNumbers (SgFunctionDefinition * functionDefinition); //!Find the enclosing namespace of a declaration SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration); // SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); //!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor) bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2); //@} //------------------------------------------------------------------------ //@{ /*! @name Preprocessing Information \brief #if-#else-#end, comments, #include, etc */ //! Dumps a located node's preprocessing information. void dumpPreprocInfo (SgLocatedNode* locatedNode); //! Find the preprocessingInfo node representing #include <header.h> or #include "header.h" within a source file. Return NULL if not found. ROSE_DLL_API PreprocessingInfo * findHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader); //! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use. ROSE_DLL_API PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader); //! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader ROSE_DLL_API void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader); //! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file ROSE_DLL_API PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before); //! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX. ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL); //! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon. ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position. ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation. ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo() ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf); //! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes. ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target, const std::string & text, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before); //!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on. ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target); //@} //! Build and attach comment onto the global scope of a source file PreprocessingInfo* attachComment( SgSourceFile * source_file, const std::string & content, PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before ); //! Build and attach comment, comment style is inferred from the language type of the target node if not provided ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before, PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration); // DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface //! Add a string to be unparsed to support code generation for back-end specific tools or compilers. ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation ); /** * Add preproccessor guard around a given node. * It surrounds the node with "#if guard" and "#endif" */ void guardNode(SgLocatedNode * target, std::string guard); //@} //------------------------------------------------------------------------ //@{ /*! @name Source File Position \brief set Sg_File_Info for a SgNode */ // ************************************************************************ // Newer versions of now depricated functions // ************************************************************************ // DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder // interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This // function is the only function that should be called directly (though in a namespace we can't define permissions). //! Set the source code positon for the current (input) node. ROSE_DLL_API void setSourcePosition(SgNode* node); // A better name might be "setSourcePositionForSubTree" //! Set the source code positon for the subtree (including the root). ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root); //! DQ (5/1/2012): New function with improved name. void setSourcePositionAsTransformation(SgNode *node); // DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability). void setSourcePositionPointersToNull(SgNode *node); // ************************************************************************ // ************************************************************************ // Older deprecated functions // ************************************************************************ // Liao, 1/8/2007, set file info. for a whole subtree as transformation generated //! Set current node's source position as transformation generated ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node); //! Set current node's source position as NULL ROSE_DLL_API void setOneSourcePositionNull(SgNode *node); //! Recursively set source position info(Sg_File_Info) as transformation generated ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root); //! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool // ROSE_DLL_API void setSourcePositionForTransformation_memoryPool(); //! Check if a node is from a system header file ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node); // DQ (2/27/2021): Adding support to detect if a SgLocatedNode is located in a header file. //! Check if a node is from a header file ROSE_DLL_API bool insideHeader (SgLocatedNode* node); //! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage. // ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode); // ************************************************************************ //@} //------------------------------------------------------------------------ //@{ /*! @name Data types \brief */ // from src/midend/astInlining/typeTraits.h // src/midend/astUtil/astInterface/AstInterface.h //! Get the right bool type according to C or C++ language input SgType* getBoolType(SgNode* n); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. ////! ////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types ROSE_DLL_API bool isStrictIntegerType(SgType* t); //!Get the data type of the first initialized name of a declaration statement ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl); //! Is a type default constructible? This may not quite work properly. ROSE_DLL_API bool isDefaultConstructible(SgType* type); //! Is a type copy constructible? This may not quite work properly. ROSE_DLL_API bool isCopyConstructible(SgType* type); //! Is a type assignable? This may not quite work properly. ROSE_DLL_API bool isAssignable(SgType* type); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //! Check if a class type is a pure virtual class. True means that there is at least //! one pure virtual function that has not been overridden. //! In the case of an incomplete class type (forward declaration), this function returns false. ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy); #endif //! Does a type have a trivial (built-in) destructor? ROSE_DLL_API bool hasTrivialDestructor(SgType* t); //! Is this type a non-constant reference type? (Handles typedefs correctly) ROSE_DLL_API bool isNonconstReference(SgType* t); //! Is this type a const or non-const reference type? (Handles typedefs correctly) ROSE_DLL_API bool isReferenceType(SgType* t); //! Is this type a pointer type? (Handles typedefs correctly) ROSE_DLL_API bool isPointerType(SgType* t); //! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to //! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile, //! it returns false for (int const * x) and (int const * const x) because these types point to a const int. //! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns //! false for const (int * const * x) ROSE_DLL_API bool isPointerToNonConstType(SgType* type); //! Is this a const type? /* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char. * Similarly, neither for const int b[10]; or const int & c =10; * The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type". */ ROSE_DLL_API bool isConstType(SgType* t); //! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers. SgType* removeConst(SgType* t); //! Is this a volatile type? ROSE_DLL_API bool isVolatileType(SgType* t); //! Is this a restrict type? ROSE_DLL_API bool isRestrictType(SgType* t); //! Is this a scalar type? /*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary */ ROSE_DLL_API bool isScalarType(SgType* t); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. //! //! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool. ROSE_DLL_API bool isStrictIntegerType(SgType* t); //! Check if a type is a struct type (a special SgClassType in ROSE) ROSE_DLL_API bool isStructType(SgType* t); //! Generate a mangled string for a given type based on Itanium C++ ABI ROSE_DLL_API std::string mangleType(SgType* type); //! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE ROSE_DLL_API std::string mangleScalarType(SgType* type); //! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types. ROSE_DLL_API std::string mangleModifierType(SgModifierType* type); //! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array. ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t); //! Get the number of dimensions of an array type ROSE_DLL_API int getDimensionCount(SgType* t); //! Get the element type of an array. It recursively find the base type for multi-dimension array types ROSE_DLL_API SgType* getArrayElementType(SgType* t); //! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion. ROSE_DLL_API SgType* getElementType(SgType* t); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// Note, the first entry of the array is a SgNullExpression, iff the /// first array dimension was not specified. /// \code /// int x[] = { 1, 2, 3 }; /// \endcode /// note, the expression does not have to be a constant /// \code /// int x[i*5]; /// \endcode /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \param varref a reference to an array variable (the variable of type arrtype) /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// If the first array dimension was not specified an expression /// that indicates that size is generated. /// \code /// int x[][3] = { 1, 2, 3, 4, 5, 6 }; /// \endcode /// the entry for the first dimension will be: /// \code /// // 3 ... size of 2nd dimension /// sizeof(x) / (sizeof(int) * 3) /// \endcode /// \pre arrtype is the array-type of varref /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) /// \post !isSgNullExpression(return-value[*]) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref); /// \overload /// \note see get_C_array_dimensions for SgVarRefExp for details. /// \todo make initname const std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname); //! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp. ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL); //! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ; ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList); //! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information. /*! * Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense. AST graph for some examples: - shared scalar: SgModifierType -->base type - shared array: SgArrayType --> SgModiferType --> base type - shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt - shared to private: SgModifierType --> SgPointerType --> base type - private to shared: SgPointerType --> SgModifierType --> base type */ ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL ); //! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property. /*! * ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property. */ ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL); //! Check if a modifier type is a UPC shared type. ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type); //! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array. ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type); //! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.) ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type); //! Get the block size of a UPC shared modifier type ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type); //! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays) ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t); //! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type. ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t); //! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first. ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t); //! Is a UPC array with dimension of X*THREADS ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t); //! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to. ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL); // DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types. //! Get the type of the associated argument expression from the function type. ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression); //! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration) ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2); //! Verify that 2 SgTemplateArgumentPtrList are equivalent. ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2); //! Test for equivalence of types independent of access permissions (private or protected modes for members of classes). ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs); //! Find the function type matching a function signature plus a given return type ROSE_DLL_API SgFunctionType* findFunctionType (SgType* return_type, SgFunctionParameterTypeList* typeList); //! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types //! They may differ in one SgTemplateType pointer but identical otherwise. ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs); //@} //------------------------------------------------------------------------ //@{ /*! @name Loop handling \brief */ // by Jeremiah //! Add a step statement to the end of a loop body //! Add a new label to the end of the loop, with the step statement after //! it; then change all continue statements in the old loop body into //! jumps to the label //! //! For example: //! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes //! while (a < 5) {if (a < -3) goto label; label: a++;} ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step); ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f); ROSE_DLL_API void convertForToWhile(SgForStatement* f); ROSE_DLL_API void convertAllForsToWhiles(SgNode* top); //! Change continue statements in a given block of code to gotos to a label ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label); //!Return the loop index variable for a for loop ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop); //!Check if a SgInitializedName is used as a loop index within a AST subtree //! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them. ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root); //! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...) /*! for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0). */ ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop); //! Routines to get and set the body of a loop ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop); ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body); //! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop); //! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop. ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond); //! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested //! //! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL); //! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1 ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/); //! Set the lower bound of a loop header for (i=lb; ...) ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb); //! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...) ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub); //! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc) ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride); //! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop); //! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation. ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop); //! Normalize a for loop, return true if successful. Generated constants will be fold by default. //! //! Translations are : //! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..) //! For test expression: //! i<x is normalized to i<= (x-1) and //! i>x is normalized to i>= (x+1) //! For increment expression: //! i++ is normalized to i+=1 and //! i-- is normalized to i+=-1 //! i-=s is normalized to i+= -s ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true); //! Normalize a for loop's test expression //! i<x is normalized to i<= (x-1) and //! i>x is normalized to i>= (x+1) ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop); ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop); //!Normalize a Fortran Do loop. Make the default increment expression (1) explicit ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop); //! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor. ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor); //! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!). ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder); //! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize); //Winnie Loop Collapsing SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor); bool getForLoopInformations( SgForStatement * for_loop, SgVariableSymbol * & iterator, SgExpression * & lower_bound, SgExpression * & upper_bound, SgExpression * & stride ); //@} //------------------------------------------------------------------------ //@{ /*! @name Topdown search \brief Top-down traversal from current node to find a node of a specified type */ //! Query a subtree to get all nodes of a given type, with an appropriate downcast. template <typename NodeType> std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant) { #if 0 printf ("Top of SageInterface::querySubTree() \n"); #endif Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant); std::vector<NodeType*> result(nodes.size(), NULL); int count = 0; #if 0 printf ("In SageInterface::querySubTree(): before initialization loop \n"); #endif for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count) { #if 0 printf ("In SageInterface::querySubTree(): in loop: count = %d \n",count); #endif NodeType* node = dynamic_cast<NodeType*>(*i); ROSE_ASSERT (node); result[count] = node; } #if 0 printf ("Leaving SageInterface::querySubTree(): after initialization loop \n"); #endif return result; } /*! \brief Returns STL vector of SgFile IR node pointers. Demonstrates use of restricted traversal over just SgFile IR nodes. */ std::vector < SgFile * >generateFileList (); /** Get the current SgProject IR Node. * * The library should never have more than one project and it asserts such. If no project has been created yet then this * function returns the null pointer. */ ROSE_DLL_API SgProject * getProject(); //! \return the project associated with a node SgProject * getProject(const SgNode * node); //! Query memory pools to grab SgNode of a specified type template <typename NodeType> static std::vector<NodeType*> getSgNodeListFromMemoryPool() { // This function uses a memory pool traversal specific to the SgFile IR nodes class MyTraversal : public ROSE_VisitTraversal { public: std::vector<NodeType*> resultlist; void visit ( SgNode* node) { NodeType* result = dynamic_cast<NodeType* > (node); ROSE_ASSERT(result!= NULL); if (result!= NULL) { resultlist.push_back(result); } }; virtual ~MyTraversal() {} }; MyTraversal my_traversal; NodeType::traverseMemoryPoolNodes(my_traversal); return my_traversal.resultlist; } /*! \brief top-down traversal from current node to find the main() function declaration */ ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode); //! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context. SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false); //midend/programTransformation/partialRedundancyElimination/pre.h //! Find referenced symbols within an expression std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr); //! Find break statements inside a particular statement, stopping at nested loops or switches /*! loops or switch statements defines their own contexts for break statements. The function will stop immediately if run on a loop or switch statement. If fortranLabel is non-empty, breaks (EXITs) to that label within nested loops are included in the returned list. */ std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = ""); //! Find all continue statements inside a particular statement, stopping at nested loops /*! Nested loops define their own contexts for continue statements. The function will stop immediately if run on a loop statement. If fortranLabel is non-empty, continues (CYCLEs) to that label within nested loops are included in the returned list. */ std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = ""); std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l); std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw); //! Collect all variable references in a subtree void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result); //! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag. template <typename T> T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining) { bool found = false; #if 0 printf ("In findDeclarationStatement(): root = %p \n",root); printf ("In findDeclarationStatement(): name = %s \n",name.c_str()); printf ("In findDeclarationStatement(): scope = %p \n",scope); printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false"); #endif // Do we really want a NULL pointer to be acceptable input to this function? // Maybe we should have an assertion that it is non-null? if (!root) return NULL; T* decl = dynamic_cast<T*>(root); #if 0 printf ("In findDeclarationStatement(): decl = %p \n",decl); #endif if (decl != NULL) { if (scope) { if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name)) { found = true; } } else // Liao 2/9/2010. We should allow NULL scope { #if 0 // DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable. SgSymbol* symbol = decl->search_for_symbol_from_symbol_table(); printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol); printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str()); #endif if (decl->search_for_symbol_from_symbol_table()->get_name() == name) { found = true; } } } if (found) { if (isDefining) { #if 0 printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration()); printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration()); #endif ROSE_ASSERT (decl->get_definingDeclaration() != NULL); #if 0 printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration()); #endif return dynamic_cast<T*> (decl->get_definingDeclaration()); } else { #if 0 printf ("In findDeclarationStatement(): returing decl = %p \n",decl); #endif return decl; } } std::vector<SgNode*> children = root->get_traversalSuccessorContainer(); #if 0 printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size()); #endif // DQ (4/10/2016): Note that if we are searching for a function member that has it's defining // declaration defined outside of the class then it will not be found in the child list. for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i) { T* target = findDeclarationStatement<T> (*i,name,scope,isDefining); if (target) { return target; } } return NULL; } //! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>. SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining); #if 0 //TODO // 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX // until reach the end node SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); // 2. return all nodes of type VariantT following the source node std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name Bottom up search \brief Backwards traverse through the AST to find a node, findEnclosingXXX() */ // remember to put const to all arguments. /** Find a node by type using upward traversal. * * Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant * ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the * starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode. * * For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first * non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining * declaration is different than the first non-defining declaration. * * If no ancestor of the requisite type of subtypes is found then this function returns a null pointer. * * If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot * be an enclosing node of the specified type. */ template <typename NodeType> NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false) { #define DEBUG_GET_ENCLOSING_NODE 0 #if 1 /* TOP_LEVEL_IF */ // DQ (12/31/2019): This version does not detect a cycle that Robb's version detects in processing Cxx11_tests/test2016_23.C. // This will have to be investigated seperately from the issue I am working on currently. // DQ (10/20/2012): This is the older version of this implementation. Until I am sure that // the newer version (below) is what we want to use I will resolve this conflict by keeping // the previous version in place. if (NULL == astNode) { return NULL; } if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) ) { return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode)); } // DQ (3/5/2012): Check for reference to self... ROSE_ASSERT(astNode->get_parent() != astNode); SgNode* parent = astNode->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. SgNode* previouslySeenParent = parent; bool foundCycle = false; int counter = 0; #if DEBUG_GET_ENCLOSING_NODE printf ("In getEnclosingNode(): previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if DEBUG_GET_ENCLOSING_NODE printf (" --- parent = %p = %s \n",parent,parent->class_name().c_str()); printf (" --- --- parent->get_parent() = %p = %s \n",parent->get_parent(),parent->get_parent()->class_name().c_str()); #endif #if 1 // DQ (1/8/2020): ROSE-82 (on RZ) This limit needs to be larger and increasing it to 500 was enough // for a specific code with a long chain of if-then-else nesting, So to make this sufficent for more // general code we have increased the lomit to 100,000. Note that 50 was not enough for real code, // but was enough for our regression tests. // DQ (12/30/2019): This is added to support detection of infinite loops over parent pointers. // if (counter >= 500) if (counter >= 100000) { printf ("Exiting: In getEnclosingNode(): loop limit exceeded: counter = %d \n",counter); ROSE_ABORT(); } #endif parent = parent->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. // ROSE_ASSERT(parent != previouslySeenParent); if (parent == previouslySeenParent) { foundCycle = true; } counter++; } #if DEBUG_GET_ENCLOSING_NODE printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif parent = previouslySeenParent; SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if 0 printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p \n",declarationStatement); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the non-defining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } #if 0 printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif // DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for // debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However, // this will have to be revisited later since it appears clear that it is a problem for the binary analysis // work when it is visited for this case. Since the cycle is detected, but there is no assertion on the // cycle, we don't exit when a cycle is identified (which is the point of the code below). // Note also that I have fixed the code (above and below) to only chase pointers through defining // declarations (where they exist), this is important since non-defining declarations can be almost // anywhere (and thus chasing them can make it appear that there are cycles where there are none // (I think); test2012_234.C demonstrates an example of this. // DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work. // if (foundCycle == true) if (foundCycle == false) { while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if 0 printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str()); if (parent->get_file_info() != NULL) parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug"); #endif SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if DEBUG_GET_ENCLOSING_NODE printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null"); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } parent = parent->get_parent(); #if 1 // DQ (3/5/2012): Check for loops that will cause infinite loops. ROSE_ASSERT(parent != previouslySeenParent); #else printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n"); if (parent == previouslySeenParent) break; #endif } } return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent)); #else /* TOP_LEVEL_IF */ // DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below). // Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop). // Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const. SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent()); std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles while (node) { if (NodeType *found = dynamic_cast<NodeType*>(node)) return found; // FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09] // DQ (12/30/2019): Provide more detail in error message. if (seen.insert(node).second == false) { printf ("Error: node is already in set and defines a cycle: node = %p = %s \n",node,node->class_name().c_str()); std::set<const SgNode*>::const_iterator i = seen.begin(); while (i != seen.end()) { const SgNode* element = *i; printf (" --- seen element: element = %p = %s \n",element,element->class_name().c_str()); i++; } printf ("Exiting after error! \n"); ROSE_ABORT(); } // ROSE_ASSERT(seen.insert(node).second); // Traverse to parent (declaration statements are a special case) if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) { SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) { // DQ (10/19/2012): Use the defining declaration instead. // node = firstNondefiningDeclaration; node = definingDeclaration; } } else { node = node->get_parent(); } } return NULL; #endif /* TOP_LEVEL_IF */ } //! Find enclosing source file node ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false); //! Get the closest scope from astNode. Return astNode if it is already a scope. ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode); //! Get the enclosing scope from a node n ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false); //! Traverse back through a node's parents to find the enclosing global scope ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode); // DQ (12/7/2020): This is supporting the recognition of functions in header files from two different AST. //! This is supporting the recognition of functions in header files from two different ASTs ROSE_DLL_API bool hasSameGlobalScope ( SgStatement* statement_1, SgStatement* statement_2 ); //! Find the function definition ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false); ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false); //! Find the closest enclosing statement, including the given node ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n); //! Find the closest switch outside a given statement (normally used for case and default statements) ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s); //! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly. ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s); //! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false); //! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration. ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false); //roseSupport/utility_functions.h //! get the SgFile node from current node ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode ); //! Get the initializer containing an expression if it is within an initializer. ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n); //! Get the closest class definition enclosing the specified AST node, ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false); //! Get the closest class declaration enclosing the specified AST node, ROSE_DLL_API SgClassDeclaration* getEnclosingClassDeclaration( SgNode* astNode ); // DQ (2/7/2019): Adding support for name qualification of variable references associated with SgPointerMemberType function parameters. //! Get the enclosing SgExprListExp (used as part of function argument index evaluation in subexpressions). ROSE_DLL_API SgExprListExp* getEnclosingExprListExp(SgNode* astNode, const bool includingSelf = false); // DQ (2/7/2019): Need a function to return when an expression is in an expression subtree. // This is part of index evaluation ofr expressions in function argument lists, but likely usefule elsewhere as well. ROSE_DLL_API bool isInSubTree(SgExpression* subtree, SgExpression* exp); // DQ (2/7/2019): Need a function to return the SgFunctionDeclaration from a SgFunctionCallExp. ROSE_DLL_API SgFunctionDeclaration* getFunctionDeclaration ( SgFunctionCallExp* functionCallExp ); // DQ (2/17/2019): Generalizing this support for SgVarRefExp and SgMemberFunctionRefExp nodes. // DQ (2/8/2019): Adding support for detecting when to use added name qualification for pointer-to-member expressions. ROSE_DLL_API bool isDataMemberReference(SgVarRefExp* varRefExp); // ROSE_DLL_API bool isAddressTaken(SgVarRefExp* varRefExp); ROSE_DLL_API bool isAddressTaken(SgExpression* refExp); // DQ (2/17/2019): Adding support for detecting when to use added name qualification for membr function references. ROSE_DLL_API bool isMemberFunctionMemberReference(SgMemberFunctionRefExp* memberFunctionRefExp); // DQ (2/15/2019): Adding support for detecting which class a member reference is being made from. // ROSE_DLL_API SgClassType* getClassTypeForDataMemberReference(SgVarRefExp* varRefExp); // ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForDataMemberReference(SgVarRefExp* varRefExp); ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForMemberReference(SgExpression* refExp); ROSE_DLL_API std::set<SgNode*> getFrontendSpecificNodes(); // DQ (2/17/2019): Display the shared nodes in the AST for debugging. ROSE_DLL_API void outputSharedNodes( SgNode* node ); // DQ (10/31/2020): Added function to help debug edits to statements in scopes. ROSE_DLL_API void displayScope(SgScopeStatement* scope); // TODO #if 0 SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); SgVariableDeclaration* findVariableDeclaratin( const string& varname) SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode); // e.g. for some expression, find its parent statement SgStatement* getEnclosingStatement(const SgNode* astNode); SgSwitchStatement* getEnclosingSwitch(SgStatement* s); SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode); // used to build a variable reference for compiler generated code in current scope SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name AST Walk and Traversal \brief */ // Liao, 1/9/2008 /*! \brief return the first global scope under current project */ ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project); /*! \brief get the last statement within a scope, return NULL if it does not exit */ ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope); //! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers. ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false); //!Find the first defining function declaration statement in a scope ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope); //! Get next statement within the same scope of current statement ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt); //! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned. ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true); #if 0 //TODO // preorder traversal from current SgNode till find next SgNode of type V_SgXXX SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode); #endif // DQ (11/15/2018): Adding support for traversals over the include file tree. //! return path prefix for subtree of include files. void listHeaderFiles ( SgIncludeFile* includeFile ); //@} //------------------------------------------------------------------------ //@{ /*! @name AST Comparison \brief Compare AST nodes, subtree, etc */ //! Check if a SgIntVal node has a given value ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value); //! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same. /*! * There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C */ ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2); //! Check if a statement is the last statement within its closed scope ROSE_DLL_API bool isLastStatement(SgStatement* stmt); //@} //------------------------------------------------------------------------ //@{ /*! @name AST insert, removal, and replacement \brief Add, remove,and replace AST scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc. */ #if 1 struct DeferredTransformation { // DQ (11/19/2020): We need to expand the use of this to cover deffered transformations of common SageInterface transformations (e.g. replaceStatement). // So I needed to move this out of being specific to the outliner and make it more generally data structure in the SageInterface. // DQ (11/15/2020): Need to add the concept of deffered transformation to cover replaceStatement operations. // DQ (8/7/2019): Store data required to support defering the transformation to insert the outlined function prototypes // into class declaration (when this is required to support the outlined function's access to protected or private data members). // This is part of an optimization to support the optimization of header file unparsing (limiting the overhead of supporting any // header file to just focus on the few (typically one) header file that would have to be unparsed. enum TransformationKind { // DQ (11/22/2020): Might need to also add SageInterface::addDefaultConstructorIfRequired() and SageStatement::insert_statment() // to support the processStatements.C transforamtions to pre-process the AST (return expressions and variable initializations). e_error, e_default, e_outliner, e_replaceStatement, e_removeStatement, e_replaceDefiningFunctionDeclarationWithFunctionPrototype, e_last }; TransformationKind deferredTransformationKind; // DQ (12/12/2020): Adding a string label so that we can name the different kinds of transformations. // E.g. moving pattern matched function from header file to dynamic library, vs. replacing function // definitions in the dynamic library file with function prototypes. std::string transformationLabel; // Remove sets statementToRemove, replace sets statementToRemove and StatementToAdd. SgStatement* statementToRemove; SgStatement* statementToAdd; SgClassDefinition* class_definition; SgDeclarationStatement* target_class_member; SgDeclarationStatement* new_function_prototype; typedef std::set<SgClassDefinition *> ClassDefSet_t; ClassDefSet_t targetClasses; typedef std::vector<SgFunctionDeclaration *> FuncDeclList_t; FuncDeclList_t targetFriends; // DQ (2/28/2021): Adding support for outlining where it involves building up pre-transformations. // For example, in the code segregation, we build a conditiona around the interval of statements // that we are outlining. This conditional is used to overwrite the first statement in the interval // list. Because we don't want to transform the AST until after the outlining, we need so save the // whole interval so that we, after the outlining, remove the statements in the interval after that // first statement. typedef std::vector<SgStatement*> IntervalType; IntervalType statementInterval; SgStatement* locationToOverwriteWithTransformation; SgStatement* transformationToOverwriteFirstStatementInInterval; SgBasicBlock* blockOfStatementsToOutline; // DQ (12/5/2019): Added ROSE_DLL_API prefix for Windows support (too all of these functions). ROSE_DLL_API DeferredTransformation(); ROSE_DLL_API DeferredTransformation(SgClassDefinition* class_definition, SgDeclarationStatement* target_class_member, SgDeclarationStatement* new_function_prototype); ROSE_DLL_API DeferredTransformation (const DeferredTransformation& X); //! Copy constructor. ROSE_DLL_API ~DeferredTransformation (void); //! Shallow; does not delete fields. ROSE_DLL_API DeferredTransformation & operator= (const DeferredTransformation& X); //! operator=() // DQ (11/20/20): static function to generate specialized version of deferred transformation object. static ROSE_DLL_API DeferredTransformation replaceDefiningFunctionDeclarationWithFunctionPrototype( SgFunctionDeclaration* functionDeclaration ); static ROSE_DLL_API DeferredTransformation replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false); static ROSE_DLL_API std::string outputDeferredTransformationKind(const TransformationKind & kind); ROSE_DLL_API void display ( std::string label ) const; }; #endif // DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining). //! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result. ROSE_DLL_API void deleteAST(SgNode* node); //! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only). ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root); // DQ (2/25/2009): Added new function to support outliner. //! Move statements in first block to the second block (preserves order and rebuilds the symbol table). ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock ); //! Move statements in Ada's package spec into C++ namespace's definition ROSE_DLL_API void moveStatementsBetweenBlocks ( SgAdaPackageSpec * sourceBlock, SgNamespaceDefinitionStatement* targetBlock ); //! Move statements in Ada's package body into C++ namespace's definition ROSE_DLL_API void moveStatementsBetweenBlocks ( SgAdaPackageBody* sourceBlock, SgNamespaceDefinitionStatement* targetBlock ); //! Move statements between C++ namespace's definitions ROSE_DLL_API void moveStatementsBetweenBlocks ( SgNamespaceDefinitionStatement* sourceBlock, SgNamespaceDefinitionStatement* targetBlock ); //! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc. ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope); //! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! Append a statement to the end of SgForInitStatement ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt); //! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); // DQ (2/6/2009): Added function to support outlining into separate file. //! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers). ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles ); //! Prepend a statement to the beginning of the current scope, handling side //! effects as appropriate ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! Prepend a statement to the beginning of SgForInitStatement ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt); //! prepend a list of statements to the beginning of the current scope, //! handling side effects as appropriate ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); //! Check if a scope statement has a simple children statement list //! so insert additional statements under the scope is straightforward and unambiguous . //! for example, SgBasicBlock has a simple statement list while IfStmt does not. ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope); //! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before or after the target statement within the //target's scope ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true); //! Insert a statement before a target statement ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before a target statement ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts); //! Insert a statement after a target statement, Move around preprocessing info automatically by default ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements after a target statement ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt); //! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope); //! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope); //! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements // then the statement is inserted at the end of the scope. ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope, bool movePreprocessingInfo=true); //! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements //then the new statements are inserted at the end of the scope. ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope); // DQ (11/21/2018): We need to sometimes insert something after the last statement of the collection from rose_edg_required_macros_and_functions.h. ROSE_DLL_API SgStatement* lastFrontEndSpecificStatement( SgGlobal* globalScope ); //! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()). ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true); //! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST() ROSE_DLL_API void deepDelete(SgNode* root); //! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested. ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false); //! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node. ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern); //! Replace all variable references to an old symbol in a scope to being references to a new symbol. // Essentially replace variable a with b. ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope ); // DQ (11/12/2018): Adding test to avoid issues that we can't test for in the unparsing of header files using the token based unparsing. //! If header file unparsing and token-based unparsing are used, then some statements in header files //! used with the same name and different include syntax can't be transformed. This is currently because //! there is no way to generally test the resulting transformed code generated by ROSE. ROSE_DLL_API bool statementCanBeTransformed(SgStatement* stmt); /** Given an expression, generates a temporary variable whose initializer optionally evaluates * that expression. Then, the var reference expression returned can be used instead of the original * expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp; * this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles * reference types correctly by using pointer types for the temporary. * @param expression Expression which will be replaced by a variable * @param scope scope in which the temporary variable will be generated * @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed * @return declaration of the temporary variable, and a a variable reference expression to use instead of * the original expression. */ std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression, SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL); /* This function creates a temporary variable for a given expression in the given scope This is different from SageInterface::createTempVariableForExpression in that it does not try to be smart to create pointers to reference types and so on. The tempt is initialized to expression. The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage. @param expression Expression which will be replaced by a variable @param scope scope in which the temporary variable will be generated */ std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression (SgExpression* expression, SgScopeStatement* scope); //! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible /*! We recommend to build SgFunctionParameterList before building a function declaration However, it is still allowed to append new arguments for existing function declarations. \todo function type , function symbol also need attention. */ ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*); //!Prepend an argument to SgFunctionParameterList ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*); //! Append an expression to a SgExprListExp, set the parent pointer also ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*); //! Append an expression list to a SgExprListExp, set the parent pointers also ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&); //! Set parameter list for a function declaration, considering existing parameter list etc. template <class actualFunction> void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) { // TODO consider the difference between C++ and Fortran // fixup the scope of arguments,no symbols for nondefining function declaration's arguments // DQ (11/25/2011): templated function so that we can handle both // SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member // function derived classes). ROSE_ASSERT(func != NULL); ROSE_ASSERT(paralist != NULL); #if 0 // At this point we don't have cerr and endl defined, so comment this code out. // Warn to users if a paralist is being shared if (paralist->get_parent() !=NULL) { cerr << "Waring! Setting a used SgFunctionParameterList to function: " << (func->get_name()).getString()<<endl << " Sharing parameter lists can corrupt symbol tables!"<<endl << " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl; // ROSE_ASSERT(false); } #endif // Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!! if (func->get_parameterList() != NULL) { if (func->get_parameterList() != paralist) { delete func->get_parameterList(); } } func->set_parameterList(paralist); paralist->set_parent(func); if (SageInterface::is_Ada_language()) { // Ada stores variable declarations in the function parameter scope (for functions) // and in a discriminantScope (for discriminated declarations). // ==> just make sure that these are set. SgInitializedNamePtrList& args = paralist->get_args(); for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); ++i) { ROSE_ASSERT(*i && isSgVariableDeclaration((*i)->get_declptr())); } } else { // DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node. // This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C, // test2012_81.C and testcode2012_82.C demonstrate this problem. SgInitializedNamePtrList & args = paralist->get_args(); for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++) { (*i)->set_declptr(func); } } } //! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer. ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma); //! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept. ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false); //! Replace a given expression with a list of statements produced by a generator ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Similar to replaceExpressionWithStatement, but with more restrictions. //! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc. ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand); //!set left hand operand for binary expressions, transparently downcasting target expressions when necessary ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs); //!set left hand operand for binary expression ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs); //! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly. ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top); // DQ (1/25/2010): Added support for directories //! Move file to be generated in a subdirectory (will be generated by the unparser). ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file ); //! Supporting function to comment relocation in insertStatement() and removeStatement(). ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement); //! Relocate comments and CPP directives from one statement to another. ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement); // DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++ // compiler which does not permit name qualification to be used to support the expression of the namespace // where a template instantiatoon would be places. Such name qualification would also sometimes require // global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be // specific to the GNU compiler versions, at least versions 4.4 through 4.8. //! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations). ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement ); ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node); ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root); // DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions). ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement); // DQ (6/7/2019): Add support for transforming function definitions to function prototypes in a subtree. // We might have to make this specific to a file (only traversing the functions in that file). /*!\brief XXX * This function operates on the new file used to support outlined function definitions. * We use a copy of the file where the code will be outlined FROM, so that if there are references to * declarations in the outlined code we can support the outpiled code with those references. This * approach has the added advantage of also supporting the same include file tree as the original * file where the outlined code is being taken from. */ ROSE_DLL_API void convertFunctionDefinitionsToFunctionPrototypes(SgNode* node); // DQ (11/10/2019): Lower level support for convertFunctionDefinitionsToFunctionPrototypes(). // DQ (10/27/2020): Need to return the generated function prototype (incase we want to mark it for output or template unparsing from the AST). // ROSE_DLL_API void replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); // ROSE_DLL_API SgDeclarationStatement* replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); ROSE_DLL_API SgFunctionDeclaration* replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); ROSE_DLL_API std::vector<SgFunctionDeclaration*> generateFunctionDefinitionsList(SgNode* node); // DQ (10/29/2020): build a function prototype for all but member functions outside of the class (except for template instantiations). // The reason why member functions outside of the class are an exception is because they can not be used except in a class and there // would already be one present for the code to compile. ROSE_DLL_API SgFunctionDeclaration* buildFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); //@} //------------------------------------------------------------------------ //@{ /*! @name AST repair, fix, and postprocessing. \brief Mostly used internally when some AST pieces are built without knowing their target scope/parent, especially during bottom-up construction of AST. The associated symbols, parent and scope pointers cannot be set on construction then. A set of utility functions are provided to patch up scope, parent, symbol for them when the target scope/parent become know. */ //! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed. /*! In AST translation, it is possible to build a variable reference before the variable is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders to get the work done. Users should call fixVariableReference() when AST is complete and all variable declarations are in place. */ ROSE_DLL_API int fixVariableReferences(SgNode* root, bool cleanUnusedSymbol=true); //!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known. /*! It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general. In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement(). */ ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope. ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope. ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope. ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope); //! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL. ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope); //! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed. ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value, SgLabelSymbol::label_type_enum label_type=SgLabelSymbol::e_start_label_type, SgScopeStatement* label_scope=NULL); //! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope); //! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST. ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope); // DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing). //! This collects the statements that are marked as transformed (useful in debugging). ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node ); //! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging). ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node ); //! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging). ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node ); // DQ (6/5/2019): Use the previously constructed set (above) to reset the IR nodes to be marked as isModified. //! Use the set of IR nodes and set the isModified flag in each IR node to true. ROSE_DLL_API void resetModifiedLocatedNodes(const std::set<SgLocatedNode*> & modifiedNodeSet); // DQ (10/23/2018): Report nodes that are marked as modified. ROSE_DLL_API void reportModifiedStatements(const std::string & label, SgNode* node); // DQ (3/22/2019): Translate CPP directives from attached preprocessor information to CPP Directive Declaration IR nodes. ROSE_DLL_API void translateToUseCppDeclarations( SgNode* n ); ROSE_DLL_API void translateScopeToUseCppDeclarations( SgScopeStatement* scope ); ROSE_DLL_API std::vector<SgC_PreprocessorDirectiveStatement*> translateStatementToUseCppDeclarations( SgStatement* statement, SgScopeStatement* scope); ROSE_DLL_API void printOutComments ( SgLocatedNode* locatedNode ); ROSE_DLL_API bool skipTranslateToUseCppDeclaration( PreprocessingInfo* currentPreprocessingInfo ); // DQ (12/2/2019): Debugging support. ROSE_DLL_API void outputFileIds( SgNode* node ); //@} //! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope. /*! This function not only set the defining and nondefining links of the newly introduced * function declaration inside a scope, but also update other same function declarations' links * accordingly if there are any. * Assumption: The function has already inserted/appended/prepended into the scope before calling this function. */ ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope); //------------------------------------------------------------------------ //@{ /*! @name Advanced AST transformations, analyses, and optimizations \brief Some complex but commonly used AST transformations. */ //! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++ ROSE_DLL_API bool collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false); //!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default. ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true); //!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default. ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true); //!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default. ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true); //! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref); //! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //!Call liveness analysis on an entire project ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false); //!get liveIn and liveOut variables for a for loop from liveness analysis result liv. ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts); #endif //!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types. ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results); //! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations! /*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */ ROSE_DLL_API void constantFolding(SgNode* r); //!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted. /*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement. */ ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s); //! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments. ROSE_DLL_API void removeJumpsToNextStatement(SgNode*); //! Remove labels which are not targets of any goto statements: its child statement is also removed by default. ROSE_DLL_API void removeUnusedLabels(SgNode* top, bool keepChild =false); //! Find unused labels which are not targets of any goto statements ROSE_DLL_API std::set<SgLabelStatement*> findUnusedLabels (SgNode* top); //! Remove consecutive labels ROSE_DLL_API void removeConsecutiveLabels(SgNode* top); //! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check. /*! * e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge * if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer) * The original assignment stmt will be removed by default * This function is a bit ambiguous about the merge direction, to be phased out. */ ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true); //! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct. ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true); //! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check. /*! * e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge */ ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt); //! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment. /*! Return the generated assignment statement, if any * e.g. int i =10; becomes int i; i=10; * This can be seen as a normalization of declarations */ ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl); //! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split. ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true); //! Replace an expression with a temporary variable and an assignment statement /*! Add a new temporary variable to contain the value of 'from'. Change reference to 'from' to use this new variable. Assumptions: (1)'from' is not within the test of a loop or 'if'; (2)not currently traversing 'from' or the statement it is in. Return value: the new temp variable declaration's assign initializer containing the from expression. */ ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = ""); //! Split long expressions into blocks of statements ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr); //! Remove labeled goto statements ROSE_DLL_API void removeLabeledGotos(SgNode* top); //! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label. ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch); //! Check if the body of a 'for' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs); //! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs); //! Check if the body of a 'while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws); //! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws); //! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws); //! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs); //! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs); //! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs); //! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true); //! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos); //! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt); // DQ (1/18/2015): This is added to support better quality token-based unparsing. //! Remove unused basic block IR nodes added as part of normalization. ROSE_DLL_API void cleanupNontransformedBasicBlockNode(); // DQ (1/18/2015): This is added to support better quality token-based unparsing. //! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs. ROSE_DLL_API void recordNormalizations(SgStatement* s); //! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while, //! switch, If, Catch, OmpBodyStmt, etc bool isBodyStatement (SgStatement* s); //! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them. void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true); // The same as changeAllBodiesToBlocks(SgNode* top). Phased out. //void changeAllLoopBodiesToBlocks(SgNode* top); //! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc. SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt); #if 0 /** If s is the body of a loop, catch, or if statement and is already a basic block, * s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent * (a loop, catch, or if statement, etc). */ SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s); #endif //! Get the constant value from a constant integer expression; abort on //! everything else. Note that signed long longs are converted to unsigned. unsigned long long getIntegerConstantValue(SgValueExp* expr); //! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace. std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt ); //! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned. SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp); //! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref. SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL); /// \brief moves the body of a function f to a new function f`; /// f's body is replaced with code that forwards the call to f`. /// \return a pair indicating the statement containing the call of f` /// and an initialized name refering to the temporary variable /// holding the result of f`. In case f returns void /// the initialized name is NULL. /// \param definingDeclaration the defining function declaration of f /// \param newName the name of function f` /// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; } /// for functions returning void and a value, respectively. /// two function declarations are inserted in f's enclosing scope /// \code /// result_type f`(...); <--- (1) /// result_type f (...) { forward call to f` } /// result_type f`(...) { original code } <--- (2) /// \endcode /// Calls to f are not updated, thus in the transformed code all /// calls will continue calling f (this is also true for /// recursive function calls from within the body of f`). /// After the function has created the wrapper, /// definingDeclaration becomes the wrapper function /// The definition of f` is the next entry in the /// statement list; the forward declaration of f` is the previous /// entry in the statement list. /// \pre definingDeclaration must be a defining declaration of a /// free standing function. /// typeid(SgFunctionDeclaration) == typeid(definingDeclaration) /// i.e., this function is NOT implemented for class member functions, /// template functions, procedures, etc. std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName); /// \overload /// \tparam NameGen functor that generates a new name based on the old name. /// interface: SgName nameGen(const SgName&) /// \param nameGen name generator /// \brief see wrapFunction for details template <class NameGen> std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen) { return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name())); } /// \brief convenience function that returns the first initialized name in a /// list of variable declarations. SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl); //@} // DQ (6/7/2012): Unclear where this function should go... bool hasTemplateSyntax( const SgName & name ); #if 0 //------------------------AST dump, stringify----------------------------- //------------------------------------------------------------------------ std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h // do we need these? std::string dump_node(const SgNode* astNode); std::string dump_tree(const SgNode* astNode); // or a friendly version of unparseToString(), as a memeber function std::string SgNode::toString(bool asSubTree=true); // dump node or subtree //----------------------------AST comparison------------------------------ //------------------------------------------------------------------------ // How to get generic functions for comparison? bool isNodeEqual(SgNode* node1, SgNode* node2); //? bool isTreeEqual(SgNode* tree1, SgNode* tree2); //! Are two expressions equal (using a deep comparison)? bool expressionTreeEqual(SgExpression*, SgExpression*); //! Are corresponding expressions in two lists equal (using a deep comparison)? bool expressionTreeEqualStar(const SgExpressionPtrList&, const SgExpressionPtrList&); //----------------------AST verfication/repair---------------------------- //------------------------------------------------------------------------ // sanity check of AST subtree, any suggestions? // TODO verifySgNode(SgNode* node, bool subTree=true); //src/midend/astDiagnostics/AstConsistencyTests.h // AstTests::runAllTests(SgProject * ) //src/midend/astUtil/astInterface/AstInterface.h.C //FixSgProject(SgProject &project) //FixSgTree(SgNode* r) //src/frontend/SageIII/astPostProcessing //AstPostProcessing(SgNode * node) //--------------------------AST modification------------------------------ //------------------------------------------------------------------------ // any operations changing AST tree, including // insert, copy, delete(remove), replace // insert before or after some point, argument list is consistent with LowLevelRewrite void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true); // previous examples //void myStatementInsert(SgStatement* target,...) // void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock) // copy // copy children of one basic block to another basic block //void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b); void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst); // delete (remove) a node or a whole subtree void removeSgNode(SgNode* targetNode); // need this? void removeSgNodeTree(SgNode* subtree); // need this? void removeStatement( SgStatement* targetStmt); //Move = delete + insert void moveAst (SgNode* src, SgNode* target); // need this? // similar to void moveStatements (SgBasicBlock* src, SgBasicBlock* target); // replace= delete old + insert new (via building or copying) // DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE. // void replaceAst(SgNode* oldNode, SgNode* newNode); //void replaceChild(SgNode* parent, SgNode* from, SgNode* to); //bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n) //--------------------------AST transformations--------------------------- //------------------------------------------------------------------------ // Advanced AST modifications through basic AST modifications // Might not be included in AST utitlity list, but listed here for the record. // extract statements/content from a scope void flattenBlocks(SgNode* n); //src/midend/astInlining/inlinerSupport.h void renameVariables(SgNode* n); void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition); void simpleCopyAndConstantPropagation(SgNode* top); void changeAllMembersToPublic(SgNode* n); void removeVariableDeclaration(SgInitializedName* initname); //! Convert something like "int a = foo();" into "int a; a = foo();" SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init); //! Rewrites a while or for loop so that the official test is changed to //! "true" and what had previously been the test is now an if-break //! combination (with an inverted condition) at the beginning of the loop //! body void pushTestIntoBody(LoopStatement* loopStmt); //programTransformation/finiteDifferencing/finiteDifferencing.h //! Move variables declared in a for statement to just outside that statement. void moveForDeclaredVariables(SgNode* root); //------------------------ Is/Has functions ------------------------------ //------------------------------------------------------------------------ // misc. boolean functions // some of them could moved to SgXXX class as a member function bool isOverloaded (SgFunctionDeclaration * functionDeclaration); bool isSwitchCond (const SgStatement* s); bool isIfCond (const SgStatement* s); bool isWhileCond (const SgStatement* s); bool isStdNamespace (const SgScopeStatement* scope); bool isTemplateInst (const SgDeclarationStatement* decl); bool isCtor (const SgFunctionDeclaration* func); bool isDtor (const SgFunctionDeclaration* func); // src/midend/astInlining/typeTraits.h bool hasTrivialDestructor(SgType* t); ROSE_DLL_API bool isNonconstReference(SgType* t); ROSE_DLL_API bool isReferenceType(SgType* t); // generic ones, or move to the SgXXX class as a member function bool isConst(SgNode* node); // const type, variable, function, etc. // .... and more bool isConstType (const SgType* type); bool isConstFunction (const SgFunctionDeclaration* decl); bool isMemberVariable(const SgInitializedName & var); //bool isMemberVariable(const SgNode& in); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); bool MayRedefined(SgExpression* expr, SgNode* root); // bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h bool hasAddressTaken(SgExpression* expr, SgNode* root); //src/midend/astInlining/inlinerSupport.C // can also classified as topdown search bool containsVariableReference(SgNode* root, SgInitializedName* var); bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var); bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc, SgInitializedName* toCheck, SgInitializedName* lifetime) //src/midend/programTransformation/partialRedundancyElimination/pre.h bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n); //------------------------ loop handling --------------------------------- //------------------------------------------------------------------------ //get and set loop control expressions // 0: init expr, 1: condition expr, 2: stride expr SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt ); int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp); bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref); SgInitializedName * getLoopIndexVar(SgForStatement* forstmt); //------------------------expressions------------------------------------- //------------------------------------------------------------------------ //src/midend/programTransformation/partialRedundancyElimination/pre.h int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root); //src/midend/astInlining/replaceExpressionWithStatement.h void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to); void replaceSubexpressionWithStatement(SgExpression* from, StatementGenerator* to); SgExpression* getRootOfExpression(SgExpression* n); //--------------------------preprocessing info. ------------------------- //------------------------------------------------------------------------ //! Removes all preprocessing information at a given position. void cutPreprocInfo (SgBasicBlock* b, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //! Pastes preprocessing information at the front of a statement. void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf, SgStatement* s); //! Pastes preprocessing information at the back of a statement. void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf, SgStatement* s); /*! * \brief Moves 'before' preprocessing information. * Moves all preprocessing information attached 'before' the source * statement to the front of the destination statement. */ // a generic one for all /// void movePreprocessingInfo(src, dest, RelativePositionType); void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest); void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest); void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest); //--------------------------------operator-------------------------------- //------------------------------------------------------------------------ from transformationSupport.h, not sure if they should be included here /* return enum code for SAGE operators */ operatorCodeType classifyOverloadedOperator(); // transformationSupport.h /*! \brief generates a source code string from operator name. This function returns a string representing the elementwise operator (for primative types) that would be match that associated with the overloaded operator for a user-defined abstractions (e.g. identifyOperator("operator+()") returns "+"). */ std::string stringifyOperator (std::string name); //--------------------------------macro ---------------------------------- //------------------------------------------------------------------------ std::string buildMacro ( std::string s ); //transformationSupport.h //--------------------------------access functions--------------------------- //----------------------------------get/set sth.----------------------------- // several categories: * get/set a direct child/grandchild node or fields * get/set a property flag value * get a descendent child node using preorder searching * get an ancestor node using bottomup/reverse searching // SgName or string? std::string getFunctionName (SgFunctionCallExp* functionCallExp); std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression ); // do we need them anymore? or existing member functions are enought? // a generic one: std::string get_name (const SgNode* node); std::string get_name (const SgDeclarationStatement * declaration); // get/set some property: should moved to SgXXX as an inherent memeber function? // access modifier void setExtern (SgFunctionDeclartion*) void clearExtern() // similarly for other declarations and other properties void setExtern (SgVariableDeclaration*) void setPublic() void setPrivate() #endif // DQ (1/23/2013): Added support for generated a set of source sequence entries. std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode ); //--------------------------------Type Traits (C++)--------------------------- bool HasNoThrowAssign(const SgType * const inputType); bool HasNoThrowCopy(const SgType * const inputType); bool HasNoThrowConstructor(const SgType * const inputType); bool HasTrivialAssign(const SgType * const inputType); bool HasTrivialCopy(const SgType * const inputType); bool HasTrivialConstructor(const SgType * const inputType); bool HasTrivialDestructor(const SgType * const inputType); bool HasVirtualDestructor(const SgType * const inputType); bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType); bool IsAbstract(const SgType * const inputType); bool IsClass(const SgType * const inputType); bool IsEmpty(const SgType * const inputType); bool IsEnum(const SgType * const inputType); bool IsPod(const SgType * const inputType); bool IsPolymorphic(const SgType * const inputType); bool IsStandardLayout(const SgType * const inputType); bool IsLiteralType(const SgType * const inputType); bool IsTrivial(const SgType * const inputType); bool IsUnion(const SgType * const inputType); SgType * UnderlyingType(SgType *type); // DQ (3/2/2014): Added a new interface function (used in the snippet insertion support). // void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList ); // DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators. bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 ); // JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface /*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */ struct const_int_expr_t { size_t value_; bool hasValue_; }; /*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */ struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr); // JP (9/17/14): Added function to test whether two SgType* are equivalent or not bool checkTypesAreEqual(SgType *typeA, SgType *typeB); //--------------------------------Java interface functions --------------------- #ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT ROSE_DLL_API std::string getTempDirectory(SgProject *project); ROSE_DLL_API void destroyTempDirectory(std::string); ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false); ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string); ROSE_DLL_API std::string preprocessImport(SgProject *, std::string); ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true); ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string); ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *); #endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT // DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters. //! This function detects template instantiations that are relevant when filters are used. /*! EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration). ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization. */ template < class T > bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter ) { // DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized // function template instnatiations (which come from normalized template functions and member functions). // Note that because of the EDG normailzation the membr function is moved outside of the class, and // thus marked as compiler generated. However the template instantiations are always marked as compiler // generated (if not specializations) and so we want to include a template instantiation that is marked // as compiler generated, but is from a template declaration that satisfyied a specific user defined filter. // The complexity of this detection is isolated here, but knowing that it must be called is more complex. // This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis. bool retval = false; #define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0 #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str()); #endif // Test for this to be a template instantation (in which case it was marked as // compiler generated but we may want to allow it to be used in the call graph, // if it's template was a part was defined in the current directory). SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function); SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function); if (templateInstantiationFunction != NULL) { // When the defining function has been normalized by EDG, only the non-defining declaration will have a source position. templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration()); SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration(); if (templateFunctionDeclaration != NULL) { retval = filter->operator()(templateFunctionDeclaration); } else { // Assume false. } #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false"); #endif } else { if (templateInstantiationMemberFunction != NULL) { // When the defining function has been normalized by EDG, only the non-defining declaration will have a source position. templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration()); SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration(); if (templateMemberFunctionDeclaration != NULL) { retval = filter->operator()(templateMemberFunctionDeclaration); } else { // Assume false. } #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false"); #endif } } return retval; } void detectCycleInType(SgType * type, const std::string & from); // DQ (7/14/2020): Debugging support. void checkForInitializers( SgNode* node ); }// end of namespace #endif
GB_serialize_array.c
//------------------------------------------------------------------------------ // GB_serialize_array: serialize an array, with optional compression //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Parallel compression method for an array. The array is compressed into // a sequence of independently allocated blocks, or returned as-is if not // compressed. Currently, only LZ4 is supported. #include "GB.h" #include "GB_serialize.h" #include "GB_lz4.h" #define GB_FREE_ALL \ { \ GB_FREE (&Sblocks, Sblocks_size) ; \ GB_serialize_free_blocks (&Blocks, Blocks_size, nblocks, Context) ; \ } GrB_Info GB_serialize_array ( // output: GB_blocks **Blocks_handle, // Blocks: array of size nblocks+1 size_t *Blocks_size_handle, // size of Blocks int64_t **Sblocks_handle, // Sblocks: array of size nblocks+1 size_t *Sblocks_size_handle, // size of Sblocks int32_t *nblocks_handle, // # of blocks int32_t *method_used, // method used size_t *compressed_size, // size of compressed block, or upper // bound if dryrun is true // input: bool dryrun, // if true, just esimate the size GB_void *X, // input array of size len int64_t len, // size of X, in bytes int32_t method, // compression method requested bool intel, // if true, use Intel IPPS int32_t algo, // compression algorithm int32_t level, // compression level GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (Blocks_handle != NULL) ; ASSERT (Blocks_size_handle != NULL) ; ASSERT (Sblocks_handle != NULL) ; ASSERT (Sblocks_size_handle != NULL) ; ASSERT (nblocks_handle != NULL) ; ASSERT (method_used != NULL) ; ASSERT (compressed_size != NULL) ; GB_blocks *Blocks = NULL ; size_t Blocks_size = 0, Sblocks_size = 0 ; int32_t nblocks = 0 ; int64_t *Sblocks = NULL ; //-------------------------------------------------------------------------- // check for quick return //-------------------------------------------------------------------------- (*Blocks_handle) = NULL ; (*Blocks_size_handle) = 0 ; (*Sblocks_handle) = NULL ; (*Sblocks_size_handle) = 0 ; (*nblocks_handle) = 0 ; (*method_used) = GxB_COMPRESSION_NONE ; (*compressed_size) = 0 ; if (X == NULL || len == 0) { // input array is empty return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // check for no compression //-------------------------------------------------------------------------- if (method <= GxB_COMPRESSION_NONE || len < 256) { // no compression, return result as a single block (plus the sentinel) if (!dryrun) { Blocks = GB_MALLOC (2, GB_blocks, &Blocks_size) ; Sblocks = GB_MALLOC (2, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Blocks [0].p = X ; // first block is all of the array X Blocks [0].p_size_allocated = 0 ; // p is shallow Sblocks [0] = 0 ; // start of first block Blocks [1].p = NULL ; // 2nd block is the final sentinel Blocks [1].p_size_allocated = 0 ; // p is shallow Sblocks [1] = len ; // first block ends at len-1 (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; } (*compressed_size) = len ; (*nblocks_handle) = 1 ; return (GrB_SUCCESS) ; } (*method_used) = method ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (len, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // determine # of blocks and allocate them //-------------------------------------------------------------------------- // divide the array into blocks, 4 per thread, or a single block if 1 thread int64_t blocksize = (nthreads == 1) ? len : GB_ICEIL (len, 4*nthreads) ; // ensure the blocksize does not exceed the LZ4 maximum ASSERT (LZ4_MAX_INPUT_SIZE < INT32_MAX) ; blocksize = GB_IMIN (blocksize, LZ4_MAX_INPUT_SIZE/2) ; // ensure the blocksize is not too small blocksize = GB_IMAX (blocksize, (64*1024)) ; // determine the final # of blocks nblocks = GB_ICEIL (len, blocksize) ; nthreads = GB_IMIN (nthreads, nblocks) ; (*nblocks_handle) = nblocks ; // allocate the output Blocks: one per block plus the sentinel block if (!dryrun) { Blocks = GB_CALLOC (nblocks+1, GB_blocks, &Blocks_size) ; Sblocks = GB_CALLOC (nblocks+1, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } // allocate the blocks, one at a time int32_t blockid ; bool ok = true ; for (blockid = 0 ; blockid < nblocks && ok ; blockid++) { // allocate a single block for the compression of X [kstart:kend-1] int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; size_t uncompressed = kend - kstart ; ASSERT (uncompressed < INT32_MAX) ; ASSERT (uncompressed > 0) ; size_t s = (size_t) LZ4_compressBound ((int) uncompressed) ; ASSERT (s < INT32_MAX) ; if (dryrun) { // do not allocate the block; just sum up the upper bound sizes (*compressed_size) += s ; } else { // allocate the block size_t size_allocated = 0 ; GB_void *p = GB_MALLOC (s, GB_void, &size_allocated) ; ok = (p != NULL) ; Blocks [blockid].p = p ; Blocks [blockid].p_size_allocated = size_allocated ; } } if (dryrun) { // GrB_Matrix_serializeSize: no more work to do. (*compressed_size) is // an upper bound of the blob_size required when the matrix is // compressed, and (*nblocks_handle) is the number of blocks to be used. // No space has been allocated. return (GrB_SUCCESS) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compress the blocks in parallel //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic) \ reduction(&&:ok) for (blockid = 0 ; blockid < nblocks ; blockid++) { // compress X [kstart:kend-1] into Blocks [blockid].p int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; const char *src = (const char *) (X + kstart) ; // source char *dst = (char *) Blocks [blockid].p ; // destination int srcSize = (int) (kend - kstart) ; // size of source size_t dsize = Blocks [blockid].p_size_allocated ; // size of dest int dstCapacity = GB_IMIN (dsize, INT32_MAX) ; int s ; switch (algo) { default : case GxB_COMPRESSION_LZ4 : s = LZ4_compress_default (src, dst, srcSize, dstCapacity) ; break ; case GxB_COMPRESSION_LZ4HC : s = LZ4_compress_HC (src, dst, srcSize, dstCapacity, level) ; break ; } ok = ok && (s > 0) ; // compressed block is now in dst [0:s-1], of size s Sblocks [blockid] = (int64_t) s ; } if (!ok) { // compression failure: this can "never" occur GB_FREE_ALL ; return (GrB_INVALID_OBJECT) ; } //-------------------------------------------------------------------------- // compute cumulative sum of the compressed blocks //-------------------------------------------------------------------------- GB_cumsum (Sblocks, nblocks, NULL, 1, Context) ; //-------------------------------------------------------------------------- // free workspace return result //-------------------------------------------------------------------------- (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; (*compressed_size) = Sblocks [nblocks] ; // actual size of the blob return (GrB_SUCCESS) ; }
t_factorize_root_parallel.c
/* ========================================================================== */ /* === GPU/t_factorize_root_parallel.c ====================================== */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * The CHOLMOD/GPU Module is licensed under Version 2.0 of the GNU * General Public License. See gpl.txt for a text of the license. * CHOLMOD is also available under other licenses; contact authors for details. * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* * File: * t_factorize_root * * Description: * Contains functions for factorization of the root algorithm. * Returns 1 if matrix not positive-definite, 0 otherwise. * */ /* includes */ #include <string.h> #include <time.h> #ifdef MKLROOT #include "mkl.h" #endif //#include "nvToolsExt.h" #include <pthread.h> /* undef macros */ #undef L_ENTRY #undef L_CLEAR #undef L_ASSIGN #undef L_MULTADD #undef L_ASSEMBLE #undef L_ASSEMBLESUB /* macros */ #ifdef REAL /* A, F, and L are all real */ #define L_ENTRY 1 #define L_CLEAR(Lx,p) Lx [p] = 0 #define L_ASSIGN(Lx,q, Ax,Az,p) Lx [q] = Ax [p] #define L_MULTADD(Lx,q, Ax,Az,p, f) Lx [q] += Ax [p] * f [0] #define L_ASSEMBLE(Lx,q,b) Lx [q] += b [0] #define L_ASSEMBLESUB(Lx,q,C,p) Lx [q] -= C [p] #else /* A and F are complex or zomplex, L and C are complex */ #define L_ENTRY 2 #define L_CLEAR(Lx,p) Lx [2*(p)] = 0 ; Lx [2*(p)+1] = 0 #define L_ASSEMBLE(Lx,q,b) Lx [2*(q)] += b [0] ; #define L_ASSEMBLESUB(Lx,q,C,p) Lx [2*(q) ] -= C [2*(p) ] ; \ Lx [2*(q)+1] -= C [2*(p)+1] ; #ifdef COMPLEX /* A, F, L, and C are all complex */ #define L_ASSIGN(Lx,q, Ax,Az,p) Lx [2*(q) ] = Ax [2*(p) ] ; \ Lx [2*(q)+1] = Ax [2*(p)+1] #define L_MULTADD(Lx,q, Ax,Az,p, f) Lx [2*(q) ] += Ax [2*(p) ] * f [0] - Ax [2*(p)+1] * f [1] ; \ Lx [2*(q)+1] += Ax [2*(p)+1] * f [0] + Ax [2*(p) ] * f [1] #else /* A and F are zomplex, L and C is complex */ #define L_ASSIGN(Lx,q, Ax,Az,p) Lx [2*(q) ] = Ax [p] ; \ Lx [2*(q)+1] = Az [p] ; #define L_MULTADD(Lx,q, Ax,Az,p, f) Lx [2*(q) ] += Ax [p] * f [0] - Az [p] * f [1] ; \ Lx [2*(q)+1] += Az [p] * f [0] + Ax [p] * f [1] #endif #endif /* * Function: * gpu_factorize_root_parallel * * Description: * Factorizes top-of-tree of elimination tree, where * the subtree does not fit the GPU. Utilizes a hybrid algorithm * presented at GTC14. * Returns 0 if matrix not positive definite, 1 otherwise. * */ int TEMPLATE2 (CHOLMOD (gpu_factorize_root_parallel)) ( cholmod_common *Common, cholmod_factor *L, cholmod_gpu_pointers *gpu_p, cholmod_cpu_pointers *cpu_p, cholmod_tree_pointers *tree_p, Int subtree ) { #ifdef SUITESPARSE_CUDA /* local variables */ size_t devBuffSize; int gpuid, numThreads, numThreads1; Int start_global, end_global, node, Apacked, Fpacked, stype, n; Int *Ls, *Lpi, *Lpx, *Lpos, *Fp, *Fi, *Fnz, *Ap, *Ai, *Anz, *Super, *h_Map, *SuperMap, *Head, *Next, *Next_save, *Previous, *Lpos_save, *supernode_levels, *supernode_levels_ptrs, *supernode_levels_subtree_ptrs, *supernode_num_levels; double *Lx, *Ax, *Az, *Fx, *Fz, *h_C, *beta; double one[2] = {1.0, 0.0}, zero[2] = {0.0, 0.0}; omp_set_nested(1); /* * Set variables & pointers */ /* set host variables */ n = L->n; numThreads = Common->ompNumThreads; numThreads1 = (Common->ompNumThreads + Common->numGPU - 1)/Common->numGPU; devBuffSize = ((size_t)(Common->devBuffSize))/sizeof(double); Apacked = cpu_p->Apacked; Fpacked = cpu_p->Fpacked; stype = cpu_p->stype; beta = cpu_p->beta; /* set host pointers */ Ls = cpu_p->Ls; Lpi = cpu_p->Lpi; Lpx = L->px; Lpos = cpu_p->Lpos; Fp = cpu_p->Fp; Fi = cpu_p->Fi; Fnz = cpu_p->Fnz; Ap = cpu_p->Ap; Ai = cpu_p->Ai; Anz = cpu_p->Anz; Super = cpu_p->Super; h_Map = cpu_p->Map; SuperMap = cpu_p->SuperMap; Head = cpu_p->Head; Next = cpu_p->Next; Next_save = cpu_p->Next_save; Lpos_save = cpu_p->Lpos_save; Previous = cpu_p->Previous; Lx = cpu_p->Lx; Ax = cpu_p->Ax; Az = cpu_p->Az; Fx = cpu_p->Fx; Fz = cpu_p->Fz; h_C = cpu_p->C; /* set tree pointers */ supernode_levels = tree_p->supernode_levels; supernode_levels_ptrs = tree_p->supernode_levels_ptrs; supernode_levels_subtree_ptrs = tree_p->supernode_levels_subtree_ptrs; supernode_num_levels = tree_p->supernode_num_levels; /* initialize GPU */ for(gpuid = 0; gpuid < Common->numGPU; gpuid++) { TEMPLATE2 (CHOLMOD (gpu_init_root))(Common, gpu_p, L, Lpi, L->nsuper, n, gpuid); } /* * loop over levels in subtree * Previously this looped over levels, synchronizing between levels. * Now this loops over all supernodes, ordered by levels, but with no synchronization * between levels. Instead, supernodes from different levels can proceed in parallel, * with appropriate flags and spin-waits to ensure descendant supernodes are complete. * This provided a major performance increase when using multiple GPUs. */ { start_global = supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]]; end_global = supernode_levels_ptrs[supernode_levels_subtree_ptrs[subtree]+supernode_num_levels[subtree]]; Int *Next_local = cpu_p->Next_local; Int *Previous_local = cpu_p->Previous_local; Int *Lpos_local = cpu_p->Lpos_local; /* create two vectors - one with the supernode id and one with a counter to synchronize supernodes */ Int event_len = end_global - start_global; Int *pending = (Int *) malloc (event_len*sizeof(Int)); Int *leaves = (Int *) malloc (event_len*sizeof(Int)); Int nleaves; omp_lock_t *node_locks = (omp_lock_t *) malloc (event_len*sizeof(omp_lock_t)); for ( node = start_global; node < end_global; node++ ) { pending[node-start_global] = 0; } for ( node = start_global; node < end_global; node++ ) { Int s, psi, nscol, sparent; Int inode; s = supernode_levels[node]; psi = Lpi[s]; nscol = Super[s+1] - Super[s]; sparent = SuperMap [Ls [psi + nscol]]; if (sparent > s && sparent < L->nsuper) { for (inode = node; inode < end_global; inode++) { if (sparent == supernode_levels[inode]) { pending[inode-start_global]++; break; } } } } nleaves = 0; for ( node = start_global; node < end_global; node++ ) { Int s; s = supernode_levels[node]; if (pending[node-start_global] == 0) leaves[nleaves++] = s; } for ( node=0; node<event_len; node++ ) { omp_init_lock(&node_locks[node]); } /* loop over supernodes */ { Int leaf_idx; printf ("threads = %ld, GPUs = %d, threads per GPU = %d\n", Common->numGPU, Common->numGPU_physical, Common->numGPU_parallel); #pragma omp parallel for schedule(static) num_threads(Common->numGPU) private ( leaf_idx, node, gpuid ) for(leaf_idx = 0; leaf_idx < nleaves; leaf_idx++) { /* local variables */ Int i, j, k; Int px, pk, pf, p, q, d, s, ss, ndrow, ndrow1, ndrow2, ndrow3, ndcol, nsrow, nsrow2, nscol, nscol2, nscol3, kd1, kd2, k1, k2, psx, psi, pdx, pdx1, pdi, pdi1, pdi2, pdend, psend, pfend, pend, dancestor, sparent, imap, idescendant, ndescendants, dlarge, iHostBuff, iDevBuff, iDevCBuff, dsmall, tail, info = 0, GPUavailable, mapCreatedOnGpu, supernodeUsedGPU; Int repeat_supernode; cudaError_t cuErrHost; #ifdef QUERY_LX_EVENTS cudaError_t cuErrDev; #endif Int desc_count; Int syrk_count; Int gemm_count; Int counter; struct cholmod_desc_t desc[Common->ompNumThreads]; struct cholmod_syrk_t syrk[Common->ompNumThreads]; struct cholmod_gemm_t gemm[Common->ompNumThreads]; /* set device id, pointers */ gpuid = omp_get_thread_num(); /* get gpuid */ Int *Map = &h_Map[gpuid*n]; /* set map */ double *C1 = &h_C[gpuid*devBuffSize]; /* set Cbuff */ const int nthreads = 1; cudaSetDevice(gpuid / Common->numGPU_parallel); /* set device */ repeat_supernode = FALSE; nscol3 = 0; /* get supernode dimensions */ s = leaves[leaf_idx]; node = start_global; while (s != EMPTY) { while (node < end_global && supernode_levels[node] != s) node++; if (node < end_global && pending[node-start_global] <= 0 && omp_test_lock(&node_locks[node-start_global])) { k1 = Super [s]; /* s contains columns k1 to k2-1 of L */ k2 = Super [s+1]; nscol = k2 - k1; /* # of columns in all of s */ psi = Lpi [s]; /* pointer to first row of s in Ls */ psx = Lpx [s]; /* pointer to first row of s in Lx */ psend = Lpi [s+1]; /* pointer just past last row of s in Ls */ nsrow = psend - psi; /* # of rows in all of s */ pend = psx + nsrow * nscol; /* s is nsrow-by-nscol */ pk = psx; /* * Initialize Supernode * * Initializes the supernode with the following steps: * * 1. clear supernode (Lx) on device * 2. create Map for supernode * */ TEMPLATE2 ( CHOLMOD (gpu_initialize_supernode_root))( Common, gpu_p, nscol, nsrow, psi, psx, gpuid ); /* construct the scattered Map for supernode s */ #pragma omp parallel for num_threads(numThreads) if ( nsrow > 128 ) for (k = 0 ; k < nsrow ; k++) { Map [Ls [psi + k]] = k ; } //#pragma omp critical (head_next) { /* reorder descendants in supernode by descreasing size */ TEMPLATE2 (CHOLMOD (gpu_reorder_descendants_root))(Common, gpu_p, k1, k2, Ls, Lpi, Lpos, Super, Head, &tail, Next, Previous, &ndescendants, &mapCreatedOnGpu, s, gpuid ); for ( d=Head[s]; d!=EMPTY; d=Next[d] ){ Next_local[d] = Next[d]; Previous_local[d] = Previous[d]; Lpos_local[d] = Lpos[d]; } for ( d = Head[s]; d != EMPTY; d = Next_local[d] ) { p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; pdi2++) ; ndrow = pdend - pdi ; /* # rows in all of d */ Lpos [d] = pdi2 - pdi ; if (Lpos [d] < ndrow) { dancestor = SuperMap [Ls [pdi2]] ; #pragma omp critical (head_next) { Next [d] = Head [dancestor] ; Head [dancestor] = d ; } } } /* prepare next supernode */ /* Lpos [s] is offset of first row of s affecting its parent */ if ( nsrow - nscol > 0 ) { Lpos [s] = nscol ; sparent = SuperMap [Ls [psi + nscol]] ; /* place s in link list of its parent */ #pragma omp critical (head_next) { Next [s] = Head [sparent] ; Head [sparent] = s ; } //Head[s] = EMPTY; } } /* end pragma omp critical */ /* copy matrix into supernode s (lower triangular part only) */ #pragma omp parallel for private ( p, pend, pfend, pf, i, j, imap, q ) num_threads(numThreads) if ( k2-k1 > 64 ) for (k = k1 ; k < k2 ; k++) { /* copy the kth column of A into the supernode */ if (stype != 0) { p = Ap [k] ; pend = (Apacked) ? (Ap [k+1]) : (p + Anz [k]) ; for ( ; p < pend ; p++) { i = Ai [p] ; if (i >= k) { imap = Map [i] ; /* row i of L is located in row Map [i] of s */ if (imap >= 0 && imap < nsrow) { L_ASSIGN (Lx,(imap+(psx+(k-k1)*nsrow)), Ax,Az,p) ; /* Lx [Map [i] + pk] = Ax [p] ; */ } } } } /* copy the kth column of A*F into the supernode */ else { double fjk[2]; pf = Fp [k] ; pfend = (Fpacked) ? (Fp [k+1]) : (p + Fnz [k]) ; for ( ; pf < pfend ; pf++) { j = Fi [pf] ; L_ASSIGN (fjk,0, Fx,Fz,pf) ; /* fjk = Fx [pf] ; */ p = Ap [j] ; pend = (Apacked) ? (Ap [j+1]) : (p + Anz [j]) ; for ( ; p < pend ; p++) { i = Ai [p] ; if (i >= k) { imap = Map [i] ; if (imap >= 0 && imap < nsrow) { L_MULTADD (Lx,(imap+(psx+(k-k1)*nsrow)),Ax,Az,p, fjk) ; /* Lx [Map [i] + pk] += Ax [p] * fjk ; */ } } } } } } /* add beta (only real part) to the diagonal of the supernode, if nonzero */ if (beta [0] != 0.0) { pk = psx ; for (k = k1 ; k < k2 ; k++) { L_ASSEMBLE (Lx,pk, beta) ; /* Lx [pk] += beta [0] ; */ pk += nsrow + 1 ; /* advance to the next diagonal entry */ } } /* save/restore the list of supernodes */ if (!repeat_supernode) { for (d = Head [s] ; d != EMPTY ; d = Next_local [d]) { Lpos_save [d] = Lpos_local [d] ; Next_save [d] = Next_local [d] ; } } else { for (d = Head [s] ; d != EMPTY ; d = Next_local [d]) { Lpos_local [d] = Lpos_save [d] ; Next_local [d] = Next_save [d] ; } } /* initialize the buffer counter */ Common->ibuffer[gpuid] = 0; supernodeUsedGPU = 0; idescendant = 0; dlarge = Head[s]; dsmall = tail; GPUavailable = 1; #ifdef MKLROOT mkl_set_num_threads_local(numThreads1); #else openblas_set_num_threads(numThreads1); #endif cuErrHost = cudaSuccess; #ifdef QUERY_LX_EVENTS cuErrDev = cudaSuccess; #endif /* loop over descendants d of supernode s */ while( (idescendant < ndescendants) ) { iHostBuff = (Common->ibuffer[gpuid]) % CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer[gpuid]) % CHOLMOD_DEVICE_LX_BUFFERS; iDevCBuff = (Common->ibuffer[gpuid]) % CHOLMOD_DEVICE_C_BUFFERS; Common->ibuffer[gpuid]++; Common->ibuffer[gpuid] = Common->ibuffer[gpuid]%(CHOLMOD_HOST_SUPERNODE_BUFFERS*CHOLMOD_DEVICE_LX_BUFFERS*CHOLMOD_DEVICE_C_BUFFERS*CHOLMOD_DEVICE_STREAMS); /* get next descendant */ if ( idescendant > 0 ) { cuErrHost = cudaEventQuery ( Common->updateCBuffersFree[gpuid][iHostBuff] ); //cuErrHost = cudaEventSynchronize ( Common->updateCBuffersFree[gpuid][iHostBuff] ); #ifdef QUERY_LX_EVENTS cuErrDev = cudaEventQuery ( Common->updateCDevBuffersFree[gpuid][iDevBuff] ); //cuErrDev = cudaEventSynchronize ( Common->updateCDevBuffersFree[gpuid][iDevBuff] ); #endif #if 0 while ( (cuErrHost != cudaSuccess #ifdef QUERY_LX_EVENTS || cuErrDev != cudaSuccess #endif ) && (ndescendants - idescendant < CHOLMOD_HOST_SUPERNODE_BUFFERS) ) { iHostBuff = (Common->ibuffer[gpuid]) % CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer[gpuid]) % CHOLMOD_DEVICE_LX_BUFFERS; iDevCBuff = (Common->ibuffer[gpuid]) % CHOLMOD_DEVICE_C_BUFFERS; Common->ibuffer[gpuid]++; Common->ibuffer[gpuid] = Common->ibuffer[gpuid]%(CHOLMOD_HOST_SUPERNODE_BUFFERS*CHOLMOD_DEVICE_LX_BUFFERS*CHOLMOD_DEVICE_C_BUFFERS*CHOLMOD_DEVICE_STREAMS); cuErrHost = cudaEventQuery ( Common->updateCBuffersFree[gpuid][iHostBuff] ); #ifdef QUERY_LX_EVENTS cuErrDev = cudaEventQuery ( Common->updateCDevBuffersFree[gpuid][iDevBuff] ); #endif } #endif } if ( cuErrHost == cudaSuccess #ifdef QUERY_LX_EVENTS && cuErrDev == cudaSuccess #endif ) { d = dlarge; dlarge = Next_local[dlarge]; GPUavailable = 1; } else { d = dsmall; dsmall = Previous_local[dsmall]; GPUavailable = 0; } /* get the size of supernode d */ kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdx = Lpx [d] ; /* pointer to first row of d in Lx */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ ndrow = pdend - pdi ; /* # rows in all of d */ /* find the range of rows of d that affect rows k1 to k2-1 of s */ p = Lpos_local [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ pdx1 = pdx + p ; /* ptr to 1st row of d affecting s in Lx */ for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; (pdi2)++) ; ndrow1 = pdi2 - pdi1 ; /* # rows in first part of d */ ndrow2 = pdend - pdi1 ; /* # rows in remaining d */ /* construct the update matrix C for this supernode d */ ndrow3 = ndrow2 - ndrow1 ; /* number of rows of C2 */ /* * Supernode Assembly * * Assemble the supernode with the following steps: * * 1. perform dsyrk * 2. perform dgemm * 3. perform addUpdate * */ if ( GPUavailable == 1 ) { TEMPLATE2 (CHOLMOD (gpu_updateC_root)) (Common, gpu_p, Lx, ndrow1, ndrow2, ndrow, ndcol, nsrow, pdx1, pdi1, iHostBuff, iDevBuff, iDevCBuff, gpuid); supernodeUsedGPU = 1; /* GPU was used for this supernode*/ idescendant++; } else if ( GPUavailable == 0 ) { Int tid; //nvtxRangeId_t id2 = nvtxRangeStartA("CPU portion"); desc_count = 0; syrk_count = 0; gemm_count = 0; counter = 0; /* loop over descendants */ for(tid = 0; tid < nthreads; tid++) { /* ensure there are remaining descendants to assemble */ if(idescendant < ndescendants) { if(tid > 0) { d = dsmall; dsmall = Previous_local[dsmall]; } /* get descendant dimensions */ kd1 = Super [d] ; kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; pdi = Lpi [d] ; pdx = Lpx [d] ; pdend = Lpi [d+1] ; ndrow = pdend - pdi ; p = Lpos_local [d] ; pdi1 = pdi + p ; pdx1 = pdx + p ; for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; (pdi2)++); ndrow1 = pdi2 - pdi1 ; ndrow2 = pdend - pdi1 ; ndrow3 = ndrow2 - ndrow1 ; /* ensure there is sufficient C buffer space to hold Schur complement update */ if ( sizeof(double) * L_ENTRY * (counter + ndrow1*ndrow2) <= Common->devBuffSize ) { idescendant++; Int m = ndrow2-ndrow1; Int n = ndrow1; Int k = ndcol; Int lda = ndrow; Int ldb = ndrow; Int ldc = ndrow2; /* store descendant dimensions */ desc[desc_count].pdi1 = pdi1; desc[desc_count].ndrow1 = ndrow1; desc[desc_count].ndrow2 = ndrow2; desc[desc_count].C = (double *)&C1[counter]; desc_count++; /* store syrk dimensions & pointers */ syrk[syrk_count].n = n; syrk[syrk_count].k = k; syrk[syrk_count].lda = lda; syrk[syrk_count].ldc = ldc; syrk[syrk_count].A = (double *)(Lx + L_ENTRY*pdx1); syrk[syrk_count].C = (double *)&C1[counter]; syrk_count++; if (m > 0) { /* store gemm dimensions & pointers */ gemm[gemm_count].m = m; gemm[gemm_count].n = n; gemm[gemm_count].k = k; gemm[gemm_count].lda = lda; gemm[gemm_count].ldb = ldb; gemm[gemm_count].ldc = ldc; gemm[gemm_count].A = (double *)(Lx + L_ENTRY*(pdx1 + n)); gemm[gemm_count].B = (double *)(Lx + L_ENTRY*pdx1); gemm[gemm_count].C = (double *)(&C1[counter] + L_ENTRY*n); gemm_count++; } /* increment pointer to C buff */ counter += L_ENTRY*n*ldc; } else { dsmall = d; tid = nthreads; // ends the loop } } else { dsmall = d; tid = nthreads; // ends the loop } } /* end loop over parallel descendants (threads) */ { Int i; /* * DSYRK * * Perform dsyrk on batch of descendants * */ /* loop over syrk's */ #pragma omp parallel for num_threads(nthreads) for(i = 0; i < syrk_count; i++) { /* get syrk dimensions */ Int n = syrk[i].n; Int k = syrk[i].k; Int lda = syrk[i].lda; Int ldc = syrk[i].ldc; double *A = (double *)syrk[i].A; double *C = (double *)syrk[i].C; double one[2] = {1.0, 0.0}; double zero[2] = {0.0, 0.0}; #ifdef REAL BLAS_dsyrk ("L", "N", n, k, one, A, lda, zero, C, ldc) ; #else BLAS_zherk ("L", "N", n, k, one, A, lda, zero, C, ldc) ; #endif } /* end loop over syrk's */ /* * DGEMM * * Perform dgemm on batch of descendants * */ /* loop over gemm's */ #pragma omp parallel for num_threads(nthreads) for(i = 0; i < gemm_count; i++) { /* get gemm dimensions */ Int m = gemm[i].m; Int n = gemm[i].n; Int k = gemm[i].k; Int lda = gemm[i].lda; Int ldb = gemm[i].ldb; Int ldc = gemm[i].ldc; double *A = (double *)gemm[i].A; double *B = (double *)gemm[i].B; double *C = (double *)gemm[i].C; double one[2] = {1.0, 0.0}; double zero[2] = {0.0, 0.0}; if (m > 0) { #ifdef REAL BLAS_dgemm ("N","T", m, n, k, one, A, lda, B, ldb, zero, C, ldc) ; #else BLAS_zgemm ("N", "C", m, n, k, one, A, lda, B, ldb, zero, C, ldc) ; #endif } } /* end loop over gemm's */ /* * Assembly * * Assemble schur complements of a batch of descendants * */ /* loop over descendants */ for(i = 0; i < desc_count; i++) { Int ii, j, q, px; /* get descendant dimensions */ Int pdi1 = desc[i].pdi1; Int ndrow1 = desc[i].ndrow1; Int ndrow2 = desc[i].ndrow2; double *C = (double *)desc[i].C; #pragma omp parallel for private ( j, ii, px, q ) num_threads(numThreads1) if (ndrow1 > 64 ) for (j = 0 ; j < ndrow1 ; j++) { px = psx + Map [Ls [pdi1 + j]]*nsrow ; for (ii = j ; ii < ndrow2 ; ii++) { q = px + Map [Ls [pdi1 + ii]] ; L_ASSEMBLESUB (Lx,q, C, ii+ndrow2*j) ; } } } /* end loop over descendants */ } //nvtxRangeEnd(id2); } } /* end loop over descendants */ /* * Final Supernode Assembly * * Sum CPU and GPU assembly's of supernode: * */ iHostBuff = (Common->ibuffer[gpuid])%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer[gpuid])%CHOLMOD_DEVICE_LX_BUFFERS; TEMPLATE2 ( CHOLMOD (gpu_final_assembly_root ))( Common, gpu_p, Lx, psx, nscol, nsrow, supernodeUsedGPU, gpuid ); /* * Cholesky Factorization * * Factorize diagonal block of spuernode s in LL' in the following steps: * 1. perform dpotrf * */ nscol2 = (repeat_supernode) ? (nscol3) : (nscol) ; if ( !(supernodeUsedGPU) || !TEMPLATE2 (CHOLMOD (gpu_lower_potrf_root))(Common, gpu_p, Lx, &info, nscol2, nsrow, psx, gpuid)) { supernodeUsedGPU = 0; #ifdef REAL LAPACK_dpotrf ("L", nscol2, /* N: nscol2 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */ info) ; /* INFO */ #else LAPACK_zpotrf ("L", nscol2, /* N: nscol2 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */ info) ; /* INFO */ #endif } /* check if the matrix is not positive definite */ if (repeat_supernode) { /* the leading part has been refactorized; it must have succeeded */ info = 0 ; /* zero out the rest of this supernode */ p = psx + nsrow * nscol3 ; pend = psx + nsrow * nscol ; for ( ; p < pend ; p++) { L_CLEAR (Lx,p) ; /* Lx [p] = 0 ; */ } } /* info is set to one in LAPACK_*potrf if blas_ok is FALSE. It is * set to zero in dpotrf/zpotrf if the factorization was successful. */ if (CHECK_BLAS_INT && !Common->blas_ok) { ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ; } /* check if the matrix is not positive definite */ if (info != 0) { /* Matrix is not positive definite. dpotrf/zpotrf do NOT report an * error if the diagonal of L has NaN's, only if it has a zero. */ if (Common->status == CHOLMOD_OK) { ERROR (CHOLMOD_NOT_POSDEF, "matrix not positive definite") ; } /* L->minor is the column of L that contains a zero or negative * diagonal term. */ L->minor = k1 + info - 1 ; /* clear the link lists of all subsequent supernodes */ for (ss = s+1 ; ss < L->nsuper ; ss++) { Head [ss] = EMPTY ; } /* zero this supernode, and all remaining supernodes */ pend = L->xsize ; for (p = psx ; p < pend ; p++) { /* Lx [p] = 0. ; */ L_CLEAR (Lx,p) ; } /* If L is indefinite, it still contains useful information. * Supernodes 0 to s-1 are valid, similar to MATLAB [R,p]=chol(A), * where the 1-based p is identical to the 0-based L->minor. Since * L->minor is in the current supernode s, it and any columns to the * left of it in supernode s are also all zero. This differs from * [R,p]=chol(A), which contains nonzero rows 1 to p-1. Fix this * by setting repeat_supernode to TRUE, and repeating supernode s. * * If Common->quick_return_if_not_posdef is true, then the entire * supernode s is not factorized; it is left as all zero. */ if (info == 1 || Common->quick_return_if_not_posdef) { /* If the first column of supernode s contains a zero or * negative diagonal entry, then it is already properly set to * zero. Also, info will be 1 if integer overflow occured in * the BLAS. */ Head [s] = EMPTY ; /* finalize GPU */ CHOLMOD (gpu_end) (Common); /*return Common->status;*/ } else { /* Repeat supernode s, but only factorize it up to but not * including the column containing the problematic diagonal * entry. */ repeat_supernode = TRUE ; //s-- ; nscol3 = info - 1 ; } } /* end if info */ if (!repeat_supernode) { /* * Triangular Solve * * Compute the subdiagonal block in the following steps: * 1. perform dtrsm * 2. copy result back into factor Lx * 3. prepare next supernode * */ nsrow2 = nsrow - nscol2 ; if (nsrow2 > 0) { /* The current supernode is columns k1 to k2-1 of L. Let L1 be the * diagonal block (factorized by dpotrf/zpotrf above; rows/cols * k1:k2-1), and L2 be rows k2:n-1 and columns k1:k2-1 of L. The * triangular system to solve is L2*L1' = S2, where S2 is * overwritten with L2. More precisely, L2 = S2 / L1' in MATLAB * notation. */ if ( !(supernodeUsedGPU) || !TEMPLATE2 (CHOLMOD(gpu_triangular_solve_root)) (Common, gpu_p, Lx, nsrow2, nscol2, nsrow, psx ,gpuid) ) { #ifdef REAL BLAS_dtrsm ("R", "L", "T", "N", nsrow2, nscol2, /* M, N */ one, /* ALPHA: 1 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */ Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */ nsrow) ; #else BLAS_ztrsm ("R", "L", "C", "N", nsrow2, nscol2, /* M, N */ one, /* ALPHA: 1 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */ Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */ nsrow) ; #endif } if (CHECK_BLAS_INT && !Common->blas_ok) { ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ; } } /* copy supernode back to factor Lx anyways */ else { TEMPLATE2 ( CHOLMOD (gpu_copy_supernode_root) )( Common, gpu_p, Lx, psx, nscol, nscol2, nsrow, supernodeUsedGPU, gpuid); } Head [s] = EMPTY ; /* link list for supernode s no longer needed */ if (repeat_supernode) { /* matrix is not positive definite; finished clean-up for supernode * containing negative diagonal */ /*return Common->status;*/ } /* Mark the supernode complete */ sparent = SuperMap [Ls [psi + nscol]]; if (sparent > s && sparent < L->nsuper) { Int inode; inode = node; while (inode < end_global && supernode_levels[inode] != sparent) inode++; if (inode < end_global) { #pragma omp atomic pending[inode-start_global]--; s = sparent; } else s = EMPTY; } } } else { s = EMPTY; } } } /* end loop over supenodes */ } free ( pending ); free ( leaves ); for ( node=0; node<event_len; node++ ) { omp_destroy_lock(&node_locks[node]); } free ( node_locks ); } /* end loop over levels */ #endif /* return ok */ return Common->status;/*(Common->status >= CHOLMOD_OK) ;*/ } /* #undef REAL #undef COMPLEX #undef ZOMPLEX */
THSTensorMath.c
#ifndef THS_GENERIC_FILE #define THS_GENERIC_FILE "generic/THSTensorMath.c" #else #define ROW_PTR2(t, r) (THTensor_(data)(t) + (r) * (t)->stride[0]) #define COL_PTR2(t, c) (THTensor_(data)(t) + (c) * (t)->stride[1]) void THSTensor_(zero)(THSTensor *self) { if (self->indices->nDimension) { THLongTensor_resizeNd(self->indices, 0, NULL, NULL); } if (self->values->nDimension) { THTensor_(resizeNd)(self->values, 0, NULL, NULL); } self->nnz = 0; } void THSTensor_(zeros)(THSTensor *r_, THLongStorage *size) { THSTensor_(resize)(r_, size); THSTensor_(zero)(r_); } void THSTensor_(zerosLike)(THSTensor *r_, THSTensor *input) { THSTensor_(resizeAs)(r_, input); THSTensor_(zero)(r_); } void THSTensor_(mul)(THSTensor *r_, THSTensor *t, real value) { if (r_ == t) { THTensor *r_values_ = THSTensor_(newValues)(r_); THTensor_(mul)(r_values_, r_values_, value); THTensor_(free)(r_values_); } else { THSTensor_(resizeAs)(r_, t); THLongTensor *r_indices_ = THSTensor_(newIndices)(r_); THTensor *r_values_ = THSTensor_(newValues)(r_); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor_resizeAs(r_indices_, t_indices_); THLongTensor_copy(r_indices_, t_indices_); THTensor_(mul)(r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THLongTensor_free(r_indices_); THTensor_(free)(r_values_); THLongTensor_free(t_indices_); THTensor_(free)(t_values_); } } /* floating point only, because that is what TH supports */ /* TODO: add in-place support */ #if defined(THS_REAL_IS_FLOAT) || defined(THS_REAL_IS_DOUBLE) void THSTensor_(pow)(THSTensor *r_, THSTensor *t_, real value) { if (value == 0) { THError("cannot raise to zeroth power on sparse tensor"); } THSTensor* t = THSTensor_(newCoalesce)(t_); THSTensor_(resizeAs)(r_, t); THLongTensor *r_indices_ = THSTensor_(newIndices)(r_); THTensor *r_values_ = THSTensor_(newValues)(r_); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor_resizeAs(r_indices_, t_indices_); THLongTensor_copy(r_indices_, t_indices_); THTensor_(pow)(r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THLongTensor_free(r_indices_); THTensor_(free)(r_values_); THLongTensor_free(t_indices_); THTensor_(free)(t_values_); THSTensor_(free)(t); } #endif void THSTensor_(div)(THSTensor *r_, THSTensor *t, real value) { if (r_ == t) { THTensor *r_values_ = THSTensor_(newValues)(r_); THTensor_(div)(r_values_, r_values_, value); THTensor_(free)(r_values_); } else { THSTensor_(resizeAs)(r_, t); THLongTensor *r_indices_ = THSTensor_(newIndices)(r_); THTensor *r_values_ = THSTensor_(newValues)(r_); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor_resizeAs(r_indices_, t_indices_); THLongTensor_copy(r_indices_, t_indices_); THTensor_(div)(r_values_, t_values_, value); r_->nnz = t->nnz; r_->coalesced = t->coalesced; THLongTensor_free(r_indices_); THTensor_(free)(r_values_); THLongTensor_free(t_indices_); THTensor_(free)(t_values_); } } void THSTensor_(cadd)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) { if(!THSTensor_(isSameSizeAs)(t, src)) { THError("cadd operands have incompatible sizes or dimension types"); } if (src->nnz == 0) { THSTensor_(copy)(r_, t); return; } if (t->nnz == 0) { THSTensor_(mul)(r_, src, value); return; } // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz, max_nnz = t_nnz + s_nnz; int t_coalesced = t->coalesced, s_coalesced = src->coalesced; int64_t nDimI = THSTensor_(nDimensionI)(src); int64_t nDimV = THSTensor_(nDimensionV)(src); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor *src_indices_ = THSTensor_(newIndices)(src); THTensor *s_values_ = THSTensor_(newValues)(src); THLongTensor *r_indices_ = THLongTensor_newWithSize2d(nDimI, max_nnz); THTensor *r_values_ = THSTensor_(newValuesWithSizeOf)(s_values_, max_nnz); THTensor_(zero)(r_values_); THSTensor_(resizeAs)(r_, src); THSTensor_(_move)(r_, r_indices_, r_values_); int64_t blockSize = r_values_->stride[0]; int64_t cmp, d; int64_t r_i = 0, t_i = 0, s_i = 0; while (t_i < t_nnz || s_i < s_nnz) { if (t_i >= t_nnz) { cmp = -1; } else if (s_i >= s_nnz) { cmp = 1; } else { cmp = 0; for (d = 0; d < nDimI; d++) { if (THTensor_fastGet2d(t_indices_, d, t_i) < THTensor_fastGet2d(src_indices_, d, s_i)) { cmp = 1; break; } if (THTensor_fastGet2d(t_indices_, d, t_i) > THTensor_fastGet2d(src_indices_, d, s_i)) { cmp = -1; break; } } } if (cmp >= 0) { for (d = 0; d < nDimI; d++) { THTensor_fastSet2d(r_indices_, d, r_i, THTensor_fastGet2d(t_indices_, d, t_i)); } THBlas_(axpy)(blockSize, 1, THTensor_(data)(t_values_) + t_i * blockSize, 1, THTensor_(data)(r_values_) + r_i * blockSize, 1); t_i++; } if (cmp <= 0) { for (d = 0; d < nDimI; d++) { THTensor_fastSet2d(r_indices_, d, r_i, THTensor_fastGet2d(src_indices_, d, s_i)); } THBlas_(axpy)(blockSize, value, THTensor_(data)(s_values_) + s_i * blockSize, 1, THTensor_(data)(r_values_) + r_i * blockSize, 1); s_i++; } r_i++; } r_->nnz = r_i; // TODO: I think it may be possible to track inside the loop and // detect when we are uncoalesced (e.g., by observing that an // index goes backwards) which may be more precise than using the // coalesced flag here. But this is easy. r_->coalesced = t_coalesced && s_coalesced; THLongTensor_free(t_indices_); THTensor_(free)(t_values_); THLongTensor_free(src_indices_); THTensor_(free)(s_values_); } void THSTensor_(csub)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) { THSTensor_(cadd)(r_, t, -value, src); } void THSTensor_(cmul)(THSTensor *r_, THSTensor *t_, THSTensor *src_) { if(!THSTensor_(isSameSizeAs)(t_, src_)) { THError("cmul operands have incompatible sizes or dimension types"); } if (src_->nnz == 0 || t_->nnz == 0) { THSTensor_(zero)(r_); return; } THSTensor *t = THSTensor_(newCoalesce)(t_); THSTensor *src = THSTensor_(newCoalesce)(src_); // saving those because they can be overwritten when doing in-place operations ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz; ptrdiff_t max_nnz = t_nnz < s_nnz ? t_nnz : s_nnz; int64_t nDimI = THSTensor_(nDimensionI)(src); int64_t nDimV = THSTensor_(nDimensionV)(src); THLongTensor *t_indices_ = THSTensor_(newIndices)(t); THTensor *t_values_ = THSTensor_(newValues)(t); THLongTensor *src_indices_ = THSTensor_(newIndices)(src); THTensor *s_values_ = THSTensor_(newValues)(src); THLongTensor *r_indices_ = THLongTensor_newWithSize2d(nDimI, max_nnz); THTensor *r_values_ = THSTensor_(newValuesWithSizeOf)(s_values_, max_nnz); THTensor_(zero)(r_values_); THSTensor_(resizeAs)(r_, src); THSTensor_(_move)(r_, r_indices_, r_values_); THTensor *src1Buffer = THTensor_(new)(); THTensor *src2Buffer = THTensor_(new)(); THTensor *dstBuffer = THTensor_(new)(); int64_t match, d; int64_t r_i = 0, t_i = 0, s_i = 0; while (t_i < t_nnz && s_i < s_nnz) { match = 1; for (d = 0; d < nDimI; d++) { if (THTensor_fastGet2d(t_indices_, d, t_i) < THTensor_fastGet2d(src_indices_, d, s_i)) { t_i++; match = 0; break; } if (THTensor_fastGet2d(t_indices_, d, t_i) > THTensor_fastGet2d(src_indices_, d, s_i)) { s_i++; match = 0; break; } } if (!match) continue; for (d = 0; d < nDimI; d++) { THTensor_fastSet2d(r_indices_, d, r_i, THTensor_fastGet2d(t_indices_, d, t_i)); } THSTensor_(mulSlice)(dstBuffer, src1Buffer, src2Buffer, r_values_, t_values_, s_values_, 0, r_i, t_i, s_i); r_i++; t_i++; s_i++; } r_->nnz = r_i; r_->coalesced = 1; THLongTensor_free(t_indices_); THTensor_(free)(t_values_); THLongTensor_free(src_indices_); THTensor_(free)(s_values_); THTensor_(free)(src1Buffer); THTensor_(free)(src2Buffer); THTensor_(free)(dstBuffer); THSTensor_(free)(t); THSTensor_(free)(src); } void THTensor_(spaddcmul)(THTensor *r_, THTensor *t, real value, THSTensor *src1, THSTensor *src2) { THSTensor *intermediate = THSTensor_(new)(); THSTensor_(cmul)(intermediate, src1, src2); THSTensor_(spcadd)(r_, t, value, intermediate); THSTensor_(free)(intermediate); } THLongTensor *THSTensor_(toCSR)(int64_t const *indices, int64_t dim, int64_t nnz) { int64_t h, i, hp0, hp1; THLongTensor *csr = THLongTensor_newWithSize1d(dim + 1); THLongTensor_zero(csr); // Convert the sparse matrix to CSR format #pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000) for (i=0; i<nnz; i++) { hp0 = indices[i]; hp1 = (i+1 == nnz) ? dim : indices[i+1]; if (hp0 != hp1) for (h = hp0; h < hp1; h++) { THTensor_fastSet1d(csr, h+1, i+1); } } return csr; } void THSTensor_(spaddmm)(THTensor *r_, real beta, THTensor *t, real alpha, THSTensor *sparse_, THTensor *dense) { int64_t h, i; int64_t dim_i, dim_j, dim_k; // ixj * jxk = ixk int64_t nnz; THLongTensor *csr, *indices; THTensor *values; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); dim_i = THSTensor_(size)(sparse, 0); dim_j = THSTensor_(size)(sparse, 1); dim_k = THTensor_(size)(dense, 1); THTensor_(resize2d)(r_, dim_i, dim_k); THArgCheck(THTensor_(size)(dense, 0) == dim_j, 3, "Expected dim 0 size %d, got %d", dim_j, THTensor_(size)(dense, 0)); THArgCheck(THTensor_(size)(t, 0) == dim_i, 1, "Expected dim 0 size %d, got %d", dim_i, THTensor_(size)(t, 0)); THArgCheck(THTensor_(size)(t, 1) == dim_k, 1, "Expected dim 1 size %d, got %d", dim_k, THTensor_(size)(t, 1)); nnz = THSTensor_(nnz)(sparse); indices = THSTensor_(newIndices)(sparse); values = THSTensor_(newValues)(sparse); csr = THSTensor_(toCSR)(THLongTensor_data(indices), dim_i, nnz); // r_ = alpha * sparse * dense if (beta == 0) { THTensor_(zero)(r_); } else if (beta == 1) { if (r_ != t) { THTensor_(copy)(r_, t); } } else { THTensor_(mul)(r_, t, beta); } #pragma omp parallel for private(h, i) schedule(static) if (nnz > 10000) for (h = 0; h < dim_i; h++) { int64_t i_start = THTensor_fastGet1d(csr, h); int64_t i_end = THTensor_fastGet1d(csr, h+1); for (i = i_start; i < i_end; i++) { real val = THTensor_fastGet1d(values, i); int64_t col = THTensor_fastGet2d(indices, 1, i); if (col >= 0 && col < dim_j) { THBlas_(axpy)(dim_k, alpha * val, ROW_PTR2(dense, col), dense->stride[1], ROW_PTR2(r_, h), r_->stride[1]); } else { THError("index out of bound. spmm: %d not between 1 and %d", col, dim_j); } } } THLongTensor_free(csr); THLongTensor_free(indices); THTensor_(free)(values); THSTensor_(free)(sparse); } void THSTensor_(sspaddmm)(THSTensor *r_, real beta, THSTensor *t, real alpha, THSTensor *sparse_, THTensor *dense) { int64_t h, i, p; int64_t dim_i, dim_j, dim_k; // ixj * jxk = ixk int64_t nnz, r_nnz, t_nnz; THLongTensor *csr, *indices, *newi, *narrowi; THTensor *values, *newv, *narrowv; THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); dim_i = THSTensor_(size)(sparse, 0); dim_j = THSTensor_(size)(sparse, 1); dim_k = THTensor_(size)(dense, 1); THSTensor_(resize2d)(r_, dim_i, dim_k); THArgCheck(THTensor_(size)(dense, 0) == dim_j, 3, "Expected dim 0 size %d, got %d", dim_j, THTensor_(size)(dense, 0)); THArgCheck(THSTensor_(size)(t, 0) == dim_i, 1, "Expected dim 0 size %d, got %d", dim_i, THSTensor_(size)(t, 0)); THArgCheck(THSTensor_(size)(t, 1) == dim_k, 1, "Expected dim 1 size %d, got %d", dim_k, THSTensor_(size)(t, 1)); nnz = THSTensor_(nnz)(sparse); indices = THSTensor_(newIndices)(sparse); values = THSTensor_(newValues)(sparse); csr = THSTensor_(toCSR)(THLongTensor_data(indices), dim_i, nnz); t_nnz = THSTensor_(nnz)(t); r_nnz = nnz * dim_k + t_nnz; newi = THLongTensor_newWithSize2d(2, r_nnz); newv = THTensor_(newWithSize1d)(r_nnz); THTensor_(zero)(newv); if (t_nnz != 0) { narrowi = THLongTensor_newNarrow(newi, 1, 0, t_nnz); narrowv = THTensor_(newNarrow)(newv, 0, 0, t_nnz); THLongTensor_copy(narrowi, THSTensor_(newIndices)(t)); THTensor_(copy)(narrowv, THSTensor_(newValues)(t)); THTensor_(mul)(newv, newv, beta); THLongTensor_free(narrowi); THTensor_(free)(narrowv); } // sparse = sparse * dense p = t_nnz; for (h = 0; h < dim_i; h++) { int64_t i_start = THTensor_fastGet1d(csr, h); int64_t i_end = THTensor_fastGet1d(csr, h+1); for (i = i_start; i < i_end; i++) { real val = THTensor_fastGet1d(values, i); int64_t col = THTensor_fastGet2d(indices, 1, i); if (col >= 0 && col < dim_j) { THBlas_(axpy)(dim_k, alpha * val, ROW_PTR2(dense, col), dense->stride[1], ROW_PTR2(newv, p), 1); } else { THError("index out of bound. sspmm: %d not between 1 and %d", col, dim_j); } } // Fill up the indices with the right values if (i_start != i_end) { for (i = 0; i < dim_k; i++) { THTensor_fastSet2d(newi, 0, p + i, h); THTensor_fastSet2d(newi, 1, p + i, i); } p += dim_k; } } // to avoid a clone r_->indices = newi; r_-> values = newv; r_-> nnz = p; THLongTensor_free(csr); THLongTensor_free(indices); THTensor_(free)(values); THSTensor_(free)(sparse); } void THSTensor_(hspmm)(THSTensor *r_, real alpha, THSTensor *sparse_, THTensor *dense) { THArgCheck(sparse_->nDimensionI == 2, 2, "matrices expected, got %dD tensor", sparse_->nDimensionI); THArgCheck(sparse_->nDimensionV == 0, 2, "scalar values expected, got %dD values", sparse_->nDimensionV); THArgCheck(dense->nDimension == 2, 2, "matrices expected, got %dD tensor", dense->nDimension); int64_t m = THSTensor_(size)(sparse_, 0); int64_t k = THSTensor_(size)(sparse_, 1); int64_t n = THTensor_(size)(dense, 1); THArgCheck(THTensor_(size)(dense, 0) == k, 3, "Expected dim 0 size %d, got %d", k, THTensor_(size)(dense, 0)); int64_t size[2] = {m, n}; THSTensor_(rawResize)(r_, 1, 1, size); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); int64_t nnz = THSTensor_(nnz)(sparse); THLongTensor *indices = THLongTensor_newWithSize2d(1, nnz); // Initialize the sparse matrix that will be used with spaddmm to send rows // from the dense matrix to rows of the output's value tensor THSTensor *newSparse = THSTensor_(newClone)(sparse); THLongTensor *spIndices = THSTensor_(newIndices)(newSparse); THLongTensor *valueIndices = THLongTensor_new(); THLongTensor_select(valueIndices, spIndices, 0, 0); // Compute output indices int64_t i = -1, prevIdx = -1; for (int64_t j = 0; j < nnz; j++) { int64_t currIdx = THTensor_fastGet1d(valueIndices, j); if (currIdx != prevIdx) { THTensor_fastSet2d(indices, 0, ++i, currIdx); prevIdx = currIdx; } THTensor_fastSet1d(valueIndices, j, i); } int64_t outNnz = i + 1; THLongTensor_resize2d(indices, 1, outNnz); THTensor *values = THTensor_(newWithSize2d)(outNnz, n); newSparse->size[0] = outNnz; // Compute output values tensor with sparse * dense multiplication THSTensor_(spaddmm)(values, 0, values, alpha, newSparse, dense); THSTensor_(_move)(r_, indices, values); THSTensor_(free)(newSparse); THLongTensor_free(spIndices); THLongTensor_free(valueIndices); THSTensor_(free)(sparse); } void THSTensor_(spcadd)(THTensor *r_, THTensor *dense, real value, THSTensor *sparse_) { THTensor_(resizeAs)(r_, dense); THSTensor *sparse = THSTensor_(newCoalesce)(sparse_); int64_t k; THLongTensor *indices = THSTensor_(newIndices)(sparse); THTensor *values = THSTensor_(newValues)(sparse); THLongStorage *storage = THSTensor_(newSizeOf)(sparse); int64_t *sizes = storage->data; int64_t nDim = THTensor_(nDimension)(dense); int64_t nDimI = THSTensor_(nDimensionI)(sparse); if (r_ != dense) THTensor_(copy)(r_, dense); if (nDim > nDimI) { THTensor *srcBuffer = THTensor_(new)(); THTensor *dstBuffer = THTensor_(new)(); for (k = 0; k < sparse->nnz; k++) { THTensor_(set)(dstBuffer, r_); for (int64_t d = 0; d < sparse->nDimensionI; d++) { THTensor_(select)(dstBuffer, dstBuffer, 0, THTensor_fastGet2d(indices, d, k)); } THTensor_(select)(srcBuffer, values, 0, k); THTensor_(cadd)(dstBuffer, dstBuffer, value, srcBuffer); } THTensor_(free)(srcBuffer); THTensor_(free)(dstBuffer); } else { #pragma omp parallel for private(k) for (k = 0; k < sparse->nnz; k++) { int64_t index = r_->storageOffset; for (int64_t d = 0; d < sparse->nDimensionI; d++) { index += r_->stride[d] * THTensor_fastGet2d(indices, d, k); } r_->storage->data[index] += value * THTensor_fastGet1d(values, k); } } THLongTensor_free(indices); THTensor_(free)(values); THLongStorage_free(storage); THSTensor_(free)(sparse); } #undef ROW_PTR2 #undef COL_PTR2 #endif
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents the 'sizes' clause in the '#pragma omp tile' directive. /// /// \code /// #pragma omp tile sizes(5,5) /// for (int i = 0; i < 64; ++i) /// for (int j = 0; j < 64; ++j) /// \endcode class OMPSizesClause final : public OMPClause, private llvm::TrailingObjects<OMPSizesClause, Expr *> { friend class OMPClauseReader; friend class llvm::TrailingObjects<OMPSizesClause, Expr *>; /// Location of '('. SourceLocation LParenLoc; /// Number of tile sizes in the clause. unsigned NumSizes; /// Build an empty clause. explicit OMPSizesClause(int NumSizes) : OMPClause(llvm::omp::OMPC_sizes, SourceLocation(), SourceLocation()), NumSizes(NumSizes) {} public: /// Build a 'sizes' AST node. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'sizes' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Sizes Content of the clause. static OMPSizesClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Sizes); /// Build an empty 'sizes' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumSizes Number of items in the clause. static OMPSizesClause *CreateEmpty(const ASTContext &C, unsigned NumSizes); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the number of list items. unsigned getNumSizes() const { return NumSizes; } /// Returns the tile size expressions. MutableArrayRef<Expr *> getSizesRefs() { return MutableArrayRef<Expr *>(static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } ArrayRef<Expr *> getSizesRefs() const { return ArrayRef<Expr *>(static_cast<const OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } /// Sets the tile size expressions. void setSizesRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumSizes); std::copy(VL.begin(), VL.end(), static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>()); } child_range children() { MutableArrayRef<Expr *> Sizes = getSizesRefs(); return child_range(reinterpret_cast<Stmt **>(Sizes.begin()), reinterpret_cast<Stmt **>(Sizes.end())); } const_child_range children() const { ArrayRef<Expr *> Sizes = getSizesRefs(); return const_child_range(reinterpret_cast<Stmt *const *>(Sizes.begin()), reinterpret_cast<Stmt *const *>(Sizes.end())); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_sizes; } }; /// Representation of the 'full' clause of the '#pragma omp unroll' directive. /// /// \code /// #pragma omp unroll full /// for (int i = 0; i < 64; ++i) /// \endcode class OMPFullClause final : public OMPClause { friend class OMPClauseReader; /// Build an empty clause. explicit OMPFullClause() : OMPClause(llvm::omp::OMPC_full, {}, {}) {} public: /// Build an AST node for a 'full' clause. /// /// \param C Context of the AST. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPFullClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Build an empty 'full' AST node for deserialization. /// /// \param C Context of the AST. static OMPFullClause *CreateEmpty(const ASTContext &C); child_range children() { return {child_iterator(), child_iterator()}; } const_child_range children() const { return {const_child_iterator(), const_child_iterator()}; } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_full; } }; /// Representation of the 'partial' clause of the '#pragma omp unroll' /// directive. /// /// \code /// #pragma omp unroll partial(4) /// for (int i = start; i < end; ++i) /// \endcode class OMPPartialClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Optional argument to the clause (unroll factor). Stmt *Factor; /// Build an empty clause. explicit OMPPartialClause() : OMPClause(llvm::omp::OMPC_partial, {}, {}) {} /// Set the unroll factor. void setFactor(Expr *E) { Factor = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build an AST node for a 'partial' clause. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'partial' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Factor Clause argument. static OMPPartialClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Factor); /// Build an empty 'partial' AST node for deserialization. /// /// \param C Context of the AST. static OMPPartialClause *CreateEmpty(const ASTContext &C); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the argument of the clause or nullptr if not set. Expr *getFactor() const { return cast_or_null<Expr>(Factor); } child_range children() { return child_range(&Factor, &Factor + 1); } const_child_range children() const { return const_child_range(&Factor, &Factor + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_partial; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Pair of Expression and Non-contiguous pair associated with the /// component. llvm::PointerIntPair<Expr *, 1, bool> AssociatedExpressionNonContiguousPr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration, bool IsNonContiguous) : AssociatedExpressionNonContiguousPr(AssociatedExpression, IsNonContiguous), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpressionNonContiguousPr.getPointer(); } bool isNonContiguous() const { return AssociatedExpressionNonContiguousPr.getInt(); } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; if (SupportsMapper) ++MapperCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents the 'init' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop init(target:obj) /// \endcode class OMPInitClause final : public OMPVarListClause<OMPInitClause>, private llvm::TrailingObjects<OMPInitClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of interop variable. SourceLocation VarLoc; bool IsTarget = false; bool IsTargetSync = false; void setInteropVar(Expr *E) { varlist_begin()[0] = E; } void setIsTarget(bool V) { IsTarget = V; } void setIsTargetSync(bool V) { IsTargetSync = V; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } /// Build 'init' clause. /// /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. /// \param N Number of expressions. OMPInitClause(bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, StartLoc, LParenLoc, EndLoc, N), VarLoc(VarLoc), IsTarget(IsTarget), IsTargetSync(IsTargetSync) {} /// Build an empty clause. OMPInitClause(unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, SourceLocation(), SourceLocation(), SourceLocation(), N) { } public: /// Creates a fully specified clause. /// /// \param C AST context. /// \param InteropVar The interop variable. /// \param PrefExprs The list of preference expressions. /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. static OMPInitClause *Create(const ASTContext &C, Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Creates an empty clause with \a N expressions. /// /// \param C AST context. /// \param N Number of expression items. static OMPInitClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() { return varlist_begin()[0]; } const Expr *getInteropVar() const { return varlist_begin()[0]; } /// Returns true is interop-type 'target' is used. bool getIsTarget() const { return IsTarget; } /// Returns true is interop-type 'targetsync' is used. bool getIsTargetSync() const { return IsTargetSync; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInitClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } using prefs_iterator = MutableArrayRef<Expr *>::iterator; using const_prefs_iterator = ArrayRef<const Expr *>::iterator; using prefs_range = llvm::iterator_range<prefs_iterator>; using const_prefs_range = llvm::iterator_range<const_prefs_iterator>; prefs_range prefs() { return prefs_range(reinterpret_cast<Expr **>(std::next(varlist_begin())), reinterpret_cast<Expr **>(varlist_end())); } const_prefs_range prefs() const { auto Prefs = const_cast<OMPInitClause *>(this)->prefs(); return const_prefs_range(Prefs.begin(), Prefs.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_init; } }; /// This represents the 'use' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop use(obj) /// \endcode class OMPUseClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'use' clause with and interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_use, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build an empty clause. OMPUseClause() : OMPClause(llvm::omp::OMPC_use, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast<Expr>(InteropVar); } child_range children() { return child_range(&InteropVar, &InteropVar + 1); } const_child_range children() const { return const_child_range(&InteropVar, &InteropVar + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive or the '#pragma omp interop' directive.. /// /// \code /// #pragma omp depobj(a) destroy /// #pragma omp interop destroy(obj) /// \endcode /// In these examples directive '#pragma omp depobj' and '#pragma omp interop' /// have a 'destroy' clause. The 'interop' directive includes an object. class OMPDestroyClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'destroy' clause with an interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast_or_null<Expr>(InteropVar); } child_range children() { if (InteropVar) return child_range(&InteropVar, &InteropVar + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (InteropVar) return const_child_range(&InteropVar, &InteropVar + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'novariants' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch novariants(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'novariants' /// clause with condition 'a > 5'. class OMPNovariantsClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'novariants' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNovariantsClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_novariants, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNovariantsClause() : OMPClause(llvm::omp::OMPC_novariants, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNovariantsClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_novariants; } }; /// This represents 'nocontext' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch nocontext(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'nocontext' /// clause with condition 'a > 5'. class OMPNocontextClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'nocontext' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNocontextClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nocontext, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNocontextClause() : OMPClause(llvm::omp::OMPC_nocontext, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNocontextClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nocontext; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This represents 'filter' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp masked filter(tid) /// \endcode /// In this example directive '#pragma omp masked' has 'filter' clause with /// thread id. class OMPFilterClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Express of the 'filter' clause. Stmt *ThreadID = nullptr; /// Sets the thread identifier. void setThreadID(Expr *TID) { ThreadID = TID; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'filter' clause with thread-id \a ThreadID. /// /// \param ThreadID Thread identifier. /// \param HelperE Helper expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFilterClause(Expr *ThreadID, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_filter, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadID(ThreadID) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPFilterClause() : OMPClause(llvm::omp::OMPC_filter, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return thread identifier. Expr *getThreadID() { return cast<Expr>(ThreadID); } /// Return thread identifier. Expr *getThreadID() const { return cast<Expr>(ThreadID); } child_range children() { return child_range(&ThreadID, &ThreadID + 1); } const_child_range children() const { return const_child_range(&ThreadID, &ThreadID + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_filter; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit##Class(PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMP.inc" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMP.inc" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMP.inc" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Check the extension trait \p TP is active. bool isExtensionActive(llvm::omp::TraitProperty TP) { for (const OMPTraitSet &Set : Sets) { if (Set.Kind != llvm::omp::TraitSet::implementation) continue; for (const OMPTraitSelector &Selector : Set.Selectors) { if (Selector.Kind != llvm::omp::TraitSelector::implementation_extension) continue; for (const OMPTraitProperty &Property : Selector.Properties) { if (Property.Kind == TP) return true; } } } return false; } /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
GB_unaryop__identity_uint32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_int32 // op(A') function: GB_tran__identity_uint32_int32 // C type: uint32_t // A type: int32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_int32 ( uint32_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ast-dump-openmp-taskloop-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp taskloop simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp taskloop simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp taskloop simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp taskloop simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp taskloop simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskloop-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:4:1, col:26> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:4:1) *const restrict' // CHECK-NEXT: | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:10:1, col:26> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:10:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:17:1, col:38> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int' // CHECK-NEXT: | | |-value: Int 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:17:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:24:1, col:38> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int' // CHECK-NEXT: | | |-value: Int 2 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:24:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTaskLoopSimdDirective {{.*}} <line:31:1, col:38> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:27, col:37> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:36> 'int' // CHECK-NEXT: | |-value: Int 2 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:36> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-<<<NULL>>> // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-<<<NULL>>> // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | |-<<<NULL>>> // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:31:1) *const restrict' // CHECK-NEXT: |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: `-IntegerLiteral {{.*}} <col:20> 'int' 0
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p, *magick_restrict q; Quantum *magick_restrict r; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance, pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance=pixel*pixel; if (distance >= fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickBooleanType difference; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance, pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance=pixel*pixel; if (distance >= fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i]=INFINITY; else distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView *image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double c1, c2, radius, sigma; KernelInfo *kernel_info; MagickBooleanType status; ssize_t i; size_t columns, rows; ssize_t y; /* Compute structural similarity index @ https://en.wikipedia.org/wiki/Structural_similarity. */ radius=SSIMRadius; artifact=GetImageArtifact(image,"compare:ssim-radius"); if (artifact != (const char *) NULL) radius=StringToDouble(artifact,(char **) NULL); sigma=SSIMSigma; artifact=GetImageArtifact(image,"compare:ssim-sigma"); if (artifact != (const char *) NULL) sigma=StringToDouble(artifact,(char **) NULL); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); c1=pow(SSIMK1*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k1"); if (artifact != (const char *) NULL) c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); c2=pow(SSIMK2*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k2"); if (artifact != (const char *) NULL) c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,reconstruct_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y- ((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/ 2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels+1], x_pixel_sigma_squared[MaxPixelChannels+1], xy_sigma[MaxPixelChannels+1], y_pixel_mu[MaxPixelChannels+1], y_pixel_sigma_squared[MaxPixelChannels+1]; const Quantum *magick_restrict reference, *magick_restrict target; MagickRealType *k; ssize_t v; (void) memset(x_pixel_mu,0,sizeof(x_pixel_mu)); (void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared)); (void) memset(xy_sigma,0,sizeof(xy_sigma)); (void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); (void) memset(y_pixel_mu,0,sizeof(y_pixel_mu)); (void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); k=kernel_info->values; reference=p; target=q; for (v=0; v < (ssize_t) kernel_info->height; v++) { ssize_t u; for (u=0; u < (ssize_t) kernel_info->width; u++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel=QuantumScale*reference[i]; x_pixel_mu[i]+=(*k)*x_pixel; x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel; y_pixel=QuantumScale* GetPixelChannel(reconstruct_image,channel,target); y_pixel_mu[i]+=(*k)*y_pixel; y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel; xy_sigma[i]+=(*k)*x_pixel*y_pixel; } k++; reference+=GetPixelChannels(image); target+=GetPixelChannels(reconstruct_image); } reference+=GetPixelChannels(image)*columns; target+=GetPixelChannels(reconstruct_image)*columns; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i]; y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i]; xy_mu=x_pixel_mu[i]*y_pixel_mu[i]; xy_sigmas=xy_sigma[i]-xy_mu; x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared; y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared; ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/ ((x_pixel_mu_squared+y_pixel_mu_squared+c1)* (x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2)); channel_distortion[i]+=ssim; channel_distortion[CompositePixelChannel]+=ssim; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetStructuralSimilarityDistortion) #endif for (i=0; i <= MaxPixelChannels; i++) distortion[i]+=channel_distortion[i]; } image_view=DestroyCacheView(image_view); reconstruct_view=DestroyCacheView(reconstruct_view); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[i]/=((double) columns*rows); } distortion[CompositePixelChannel]/=((double) columns*rows); distortion[CompositePixelChannel]/=(double) GetImageChannels(image); kernel_info=DestroyKernelInfo(kernel_info); return(status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetStructuralSimilarityDistortion(image,reconstruct_image, distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=(1.0-(distortion[i]))/2.0; return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,similarity_metric) \ magick_number_threads(image,image,image->rows-reference->rows+1,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
spmodel.h
//-------------------------------------------------------------------------------- // Copyright (c) 2017-2020, sanko-shoko. All rights reserved. //-------------------------------------------------------------------------------- #ifndef __SP_MODEL_H__ #define __SP_MODEL_H__ #include "spcore/spcore.h" #include "spapp/spalgo/spkdtree.h" #include "spapp/spimg/sprender.h" namespace sp{ //-------------------------------------------------------------------------------- // model edge //-------------------------------------------------------------------------------- struct Edge { Vec3 pos, drc, nrm[2]; }; //-------------------------------------------------------------------------------- // model util //-------------------------------------------------------------------------------- SP_CPUFUNC Vec3 getModelCent(const Mem1<Mesh3> &model){ Vec3 sum = getVec3(0.0, 0.0, 0.0); for (int i = 0; i < model.size(); i++){ sum += getMeshCent(model[i]); } return sum / model.size(); } SP_CPUFUNC SP_REAL getModelRadius(const Mem1<Mesh3> &model){ Mem1<SP_REAL> mem(model.size()); for (int i = 0; i < mem.size(); i++){ mem[i] = normVec(getMeshCent(model[i])); } return max(mem); } SP_CPUFUNC SP_REAL getModelDistance(const Mem1<Mesh3> &model, const CamParam &cam){ const double radius = getModelRadius(model); const double distance = 1.2 * max(cam.fx, cam.fy) * radius / (0.5 * min(cam.dsize[0], cam.dsize[1])); return static_cast<SP_REAL>(distance); } SP_CPUFUNC Mem1<VecPD3> getModelPoint(const Mem1<Mesh3> &model, const int density = 50){ const CamParam cam = getCamParam(density, density); const SP_REAL distance = getModelDistance(model, cam); Mem1<VecPD3> tmp; const int num = getGeodesicMeshNum(0); for (int i = 0; i < num; i++){ const Vec3 v = getMeshCent(getGeodesicMesh(0, i)) * (-1.0); const Pose pose = getPose(getRotDirection(v), getVec3(0.0, 0.0, distance)); Mem2<VecPD3> map; renderVecPD(map, cam, pose, model); const Mat mat = getMat(invPose(pose)); for (int j = 0; j < map.size(); j++){ if (map[j].pos.z > 0 && dotVec(map[j].pos, map[j].drc) < 0){ tmp.push(mat * map[j]); } } } tmp = shuffle(tmp); Mem1<VecPD3> pnts; const double unit = 2 * distance / (cam.fx + cam.fy); for (int i = 0; i < tmp.size(); i++){ bool check = true; for (int j = 0; j < pnts.size(); j++){ if (dotVec(pnts[j].drc, tmp[i].drc) > 0.5 && normVec(pnts[j].pos - tmp[i].pos) < unit){ check = false; break; } } if (check == true){ pnts.push(tmp[i]); } } return pnts; } SP_CPUFUNC Mem1<Edge> getModelEdge(const Mem1<Mesh3> &model, const int density = 50) { KdTree<SP_REAL> kdtree; kdtree.init(3); for (int i = 0; i < model.size(); i++) { for (int j = 0; j < 3; j++) { kdtree.addData(&model[i].pos[j]); } } kdtree.makeTree(); Mem1<Edge> edges; const SP_REAL radius = getModelRadius(model); const SP_REAL unit = 2.0 * radius / density; for (int i = 0; i < model.size(); i++) { for (int j = 0; j < 3; j++) { const Vec3 A = model[i].pos[(j + 0) % 3]; const Vec3 B = model[i].pos[(j + 1) % 3]; const Vec3 V = unitVec(B - A); const Mem1<int> list = kdtree.search(&A, normVec(B - A) + 0.01); for (int k = 0; k < list.size(); k++) { const int mid = list[k] / 3; const int pid = list[k] % 3; if (mid <= i) continue; const Vec3 C = model[mid].pos[(pid + 0) % 3]; const Vec3 D = model[mid].pos[(pid + 1) % 3]; const Vec3 F = normVec(C - A) > normVec(D - A) ? C : D; if (fabs(dotVec(V, unitVec(F - A))) < 0.99) continue; if (fabs(dotVec(V, unitVec(D - C))) < 0.99) continue; const Vec3 O = dotVec(V, C) < dotVec(V, D) ? C : D; const Vec3 P = dotVec(V, C) < dotVec(V, D) ? D : C; const Vec3 X = dotVec(V, A) > dotVec(V, O) ? A : O; const Vec3 Y = dotVec(V, B) < dotVec(V, P) ? B : P; if (normVec(Y - X) < 0.01) continue; const int div = ceil(normVec(Y - X) / unit); for (int d = 0; d < div; d++) { Edge edge; edge.pos = (Y - X) / (div + 1.0) * (d + 1.0) + X; edge.drc = V; edge.nrm[0] = getMeshNrm(model[i]); edge.nrm[1] = getMeshNrm(model[mid]); edges.push(edge); } } } } return edges; } //-------------------------------------------------------------------------------- // pose model //-------------------------------------------------------------------------------- class PoseModel { public: Pose pose; Mem1<Edge> edges; Mem1<VecPD3> pnts; public: PoseModel() { } PoseModel(const PoseModel &pmodel) { *this = pmodel; } PoseModel& operator = (const PoseModel &pmodel) { pose = pmodel.pose; edges = pmodel.edges; pnts = pmodel.pnts; return *this; } }; SP_CPUFUNC Mem1<PoseModel> getPoseModel(const Mem1<Mesh3> &model, const double distance, const int level = 2, const int density = 50) { const double radius = getModelRadius(model); const double unit = 2.0 * radius / density; const int size = 300; const double f = distance * size / (1.2 * 2.0 * radius); const CamParam cam = getCamParam(size, size, f, f); Mem1<PoseModel> pmodels; // pose { const int num = getGeodesicMeshNum(level); pmodels.resize(num); for (int i = 0; i < num; i++) { const Vec3 v = getMeshCent(getGeodesicMesh(level, i)) * (-1.0); const Pose pose = getPose(getRotDirection(v), getVec3(0.0, 0.0, distance)); pmodels[i].pose = pose; } } // contour edge { const Mem1<Edge> &edges = getModelEdge(model, density); KdTree<SP_REAL> kdtree; kdtree.init(3); for (int i = 0; i < edges.size(); i++) { kdtree.addData(&edges[i].pos); } kdtree.makeTree(); #if SP_USE_OMP #pragma omp parallel for #endif for (int i = 0; i < pmodels.size(); i++) { PoseModel &pmodel = pmodels[i]; pmodel.edges.clear(); const Pose &pose = pmodel.pose; Mem2<VecPD3> map; renderVecPD(map, cam, pose, model); const Mat pmat = getMat(pose); const Mat rmat = getMat(pose.rot); Mem1<bool> flags(edges.size()); flags.zero(); for (int j = 0; j < edges.size(); j++) { const Vec3 pos = pmat * edges[j].pos; const Vec2 pix = mulCamD(cam, prjVec(pos)); if (flags[j] == true) continue; const int x = round(pix.x); const int y = round(pix.y); bool contour = false ; for (int v = -1; v <= 1; v++) { for (int u = -1; u <= 1; u++) { const VecPD3 &vec = map(x + u, y + v); if (vec.pos.z == 0.0) { contour = true; goto _exit0; } } } _exit0: const Vec3 nrm0 = rmat * edges[j].nrm[0]; const Vec3 nrm1 = rmat * edges[j].nrm[1]; if (contour == true && dotVec(nrm0, pos) * dotVec(nrm1, pos) <= 0.0) { pmodel.edges.push(edges[j]); const Mem1<int> list = kdtree.search(&edges[j].pos, unit); for (int k = 0; k < list.size(); k++) { flags[list[k]] = true; } } } } } // surface point { const Mem1<VecPD3> &pnts = getModelPoint(model, density); #if SP_USE_OMP #pragma omp parallel for #endif for (int i = 0; i < pmodels.size(); i++) { PoseModel &pmodel = pmodels[i]; pmodel.pnts.clear(); const Pose &pose = pmodel.pose; Mem2<VecPD3> map; renderVecPD(map, cam, pose, model); const Mat pmat = getMat(pose); const Mat rmat = getMat(pose.rot); for (int j = 0; j < pnts.size(); j++) { const Vec3 pos = pmat * pnts[j].pos; const Vec3 drc = rmat * pnts[j].drc; const Vec2 pix = mulCamD(cam, prjVec(pos)); const int x = round(pix.x); const int y = round(pix.y); bool visible = false; for (int v = -1; v <= 1; v++) { for (int u = -1; u <= 1; u++) { const VecPD3 &vec = map(x + u, y + v); if (vec.pos.z > pos.z - SP_SMALL) { visible = true; goto _exit1; } } } _exit1: if (visible == true && dotVec(drc, pos) <= 0.0) { pmodel.pnts.push(pnts[j]); } } } } return pmodels; } SP_CPUFUNC int findPoseModel(const Mem1<PoseModel> &pmodels, const Pose &pose) { int id = -1; SP_REAL minv = SP_INFINITY; for (int i = 0; i < pmodels.size(); i++) { Vec3 vec0 = getEuler(pose.rot); Vec3 vec1 = getEuler(pmodels[i].pose.rot); vec0.z = 0.0; vec1.z = 0.0; const SP_REAL dif = difRot(getRotEuler(vec0), getRotEuler(vec1)); if (dif < minv) { minv = dif; id = i; } } return id; } //-------------------------------------------------------------------------------- // sample model //-------------------------------------------------------------------------------- SP_CPUFUNC Mem1<Mesh3> loadPlane(const double size, const int xyz, const int nrm) { Mem1<Mesh3> model; const double hs = size * 0.5; Vec3 a, b, c, d; switch(xyz){ case 0: a = getVec3(0.0, -hs, -hs); b = getVec3(0.0, +hs, -hs); c = getVec3(0.0, +hs, +hs); d = getVec3(0.0, -hs, +hs); break; case 1: a = getVec3(-hs, 0.0, -hs); b = getVec3(-hs, 0.0, +hs); c = getVec3(+hs, 0.0, +hs); d = getVec3(+hs, 0.0, -hs); break; case 2: a = getVec3(-hs, -hs, 0.0); b = getVec3(+hs, -hs, 0.0); c = getVec3(+hs, +hs, 0.0); d = getVec3(-hs, +hs, 0.0); break; } if (nrm > 0) { model.push(getMesh3(a, b, c)); model.push(getMesh3(a, c, d)); } else { model.push(getMesh3(c, b, a)); model.push(getMesh3(c, a, d)); } return model; } SP_CPUFUNC Mem1<Mesh3> loadGeodesicDorm(const double size, const int div) { Mem1<Mesh3> model; const int num = getGeodesicMeshNum(div); for (int i = 0; i < num; i++) { model.push(getGeodesicMesh(div, i) * size); } return model; } SP_CPUFUNC Mem1<Mesh3> loadCube(const double size) { Mem1<Mesh3> model; const double half = size / 2.0; for (int z = -1; z <= +1; z += 2) { for (int y = -1; y <= +1; y += 2) { for (int x = -1; x <= +1; x += 2) { if ((x * y * z) > 0) continue; const Vec3 p0 = getVec3(+x, +y, +z) * half; const Vec3 px = getVec3(-x, +y, +z) * half; const Vec3 py = getVec3(+x, -y, +z) * half; const Vec3 pz = getVec3(+x, +y, -z) * half; model.push(getMesh3(p0, py, px)); model.push(getMesh3(p0, pz, py)); model.push(getMesh3(p0, px, pz)); } } } return model; } SP_CPUFUNC Mem1<Mesh3> loadCone(const Vec3 &drc, const double radius, const int div = 36) { Mem1<Mesh3> model; const Vec3 nx = invRot(getRotDirection(drc)) * getVec3(1.0, 0.0, 0.0); const Vec3 ny = invRot(getRotDirection(drc)) * getVec3(0.0, 1.0, 0.0); const double step = 360.0 / div; for (int i = 0; i < div; i++) { const double pa = (i + 0) * step * SP_PI / 180.0; const double pb = (i + 1) * step * SP_PI / 180.0; const Vec3 a = (nx * cos(pa) + ny * sin(pa)) * radius; const Vec3 b = (nx * cos(pb) + ny * sin(pb)) * radius; const Vec3 c = drc; model.push(getMesh3(a, b, c)); model.push(getMesh3(b, a, getVec3(0.0, 0.0, 0.0))); } return model; } SP_CPUFUNC Mem1<Mesh3> loadCylinder(const Vec3 &drc, const double radius, const int div = 36) { Mem1<Mesh3> model; const Vec3 nx = invRot(getRotDirection(drc)) * getVec3(1.0, 0.0, 0.0); const Vec3 ny = invRot(getRotDirection(drc)) * getVec3(0.0, 1.0, 0.0); const double step = 360.0 / div; for (int i = 0; i < div; i++) { const double pa = (i + 0) * step * SP_PI / 180.0; const double pb = (i + 1) * step * SP_PI / 180.0; const Vec3 a = (nx * cos(pa) + ny * sin(pa)) * radius; const Vec3 b = (nx * cos(pb) + ny * sin(pb)) * radius; const Vec3 c = a + drc; const Vec3 d = b + drc; model.push(getMesh3(a, b, d)); model.push(getMesh3(d, c, a)); model.push(getMesh3(b, a, getVec3(0.0, 0.0, 0.0))); model.push(getMesh3(a, b, getVec3(0.0, 0.0, 0.0)) + drc); } return model; } } #endif
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride); const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride); typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); std::size_t sizeA = kc*mc; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0); ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0); RhsScalar* blockB = blocking.blockB(); eigen_internal_assert(blockB!=0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc); // Pack B_k to B' in a parallel fashion: // each thread packs the sub block B_k,j to B'_j where j is the thread id. // However, before copying to B'_j, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length); // Notify the other threads that the part B'_j is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per B'_j for(Index shift=0; shift<threads; ++shift) { Index j = (tid+shift)%threads; // At this point we have to make sure that B'_j has been updated by the thread j, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if(shift>0) while(info[j].sync!=k) {} gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); } // Then keep going as usual with the remaining A' for(Index i=mc; i<rows; i+=mc) { const Index actual_mc = (std::min)(i+mc,rows)-i; // pack A_i,k to A' pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc); // C_i += A' * B' gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w); } // Release all the sub blocks B'_j of B' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index j=0; j<threads; ++j) { #pragma omp atomic info[j].users -= 1; } } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*cols; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW()); // For each horizontal panel of the rhs, and corresponding panel of the lhs... // (==GEMM_VAR1) for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack rhs's panel into a sequential chunk of memory (L2 caching) // Note that this panel will be read as many times as the number of blocks in the lhs's // vertical panel which is, in practice, a very low number. pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols); // For each mc x kc block of the lhs's vertical panel... // (==GEPP_VAR1) for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; // We pack the lhs's block into a sequential chunk of memory (L1 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro vertical panel of the large rhs's panel (e.g., cols/4 times). pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc); // Everything is packed, we can now call the block * panel kernel: gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW); } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> > : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > {}; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { m_blocking.allocateB(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(), /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; RhsScalar* m_blockW; DenseIndex m_mc; DenseIndex m_nc; DenseIndex m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {} inline DenseIndex mc() const { return m_mc; } inline DenseIndex nc() const { return m_nc; } inline DenseIndex kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } inline RhsScalar* blockW() { return m_blockW; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth, SizeW = MaxDepth * Traits::WorkSpaceFactor }; EIGEN_ALIGN16 LhsScalar m_staticA[SizeA]; EIGEN_ALIGN16 RhsScalar m_staticB[SizeB]; EIGEN_ALIGN16 RhsScalar m_staticW[SizeW]; public: gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; this->m_blockW = m_staticW; } inline void allocateA() {} inline void allocateB() {} inline void allocateW() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; DenseIndex m_sizeW; public: gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; m_sizeW = this->m_kc*Traits::WorkSpaceFactor; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateW() { if(this->m_blockW==0) this->m_blockW = aligned_new<RhsScalar>(m_sizeW); } void allocateAll() { allocateA(); allocateB(); allocateW(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); aligned_delete(this->m_blockW, m_sizeW); } }; } // end namespace internal template<typename Lhs, typename Rhs> class GeneralProduct<Lhs, Rhs, GemmProduct> : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> { enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef Scalar ResScalar; GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { #if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG)) typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); #endif } template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); if(m_lhs.cols()==0 || m_lhs.rows()==0 || m_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } }; } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
aidisp.c
/*- * Copyright (c) 2012-2017 Ilya Kaliman * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "balance.h" #include "private.h" static const double quad_fact[12] = { 0.72086099022968040154e-02, 0.17697067815034886394e-01, 0.30660908596251749739e-01, 0.48381293256249884995e-01, 0.74878830420650517080e-01, 0.11806515901361630228e+00, 0.19535413832209084204e+00, 0.35055692324483221824e+00, 0.71577113554429568336e+00, 1.81409759976323969729e+00, 6.97923445114870823247e+00, 8.32480938829658453917e+01 }; static const double quad_freq[12] = { 0.77932702233253671082e-05, 0.22821071773724297874e-03, 0.15211319247778075068e-02, 0.60833919905855461032e-02, 0.19223967039304198946e-01, 0.54392829363594207533e-01, 0.14891668800412559598e+00, 0.42134903703482291779e+00, 1.33149401066630080343e+00, 5.32498192172462030801e+00, 3.54935126637048206534e+01, 1.03935828835455831714e+03 }; static double quadrature(const mat_t *tensor, size_t i, size_t j, double de) { double m, sum = 0.0; for (int k = 0; k < 12; k++) { m = mat_get(tensor + k, i, j); sum += m * quad_fact[k] / (de * de + quad_freq[k]); } return sum * de; } static double get_dip_int(struct efp *efp, size_t i_occ, size_t i_vir, size_t axis) { size_t idx, size; size = efp->n_ai_core + efp->n_ai_act + efp->n_ai_vir; idx = axis * size * size + i_occ * size + (efp->n_ai_core + efp->n_ai_act) + i_vir; return efp->ai_dipole_integrals[idx]; } static double compute_ai_disp_pt(struct efp *efp, size_t fr_idx, size_t pt_idx) { struct frag *frag; struct dynamic_polarizable_pt *pt; double sum = 0.0; size_t ncoreact; frag = efp->frags + fr_idx; pt = frag->dynamic_polarizable_pts + pt_idx; ncoreact = efp->n_ai_core + efp->n_ai_act; for (size_t i_vir = 0; i_vir < efp->n_ai_vir; i_vir++) { double e_vir = efp->ai_orbital_energies[ncoreact + i_vir]; for (size_t i_occ = 0; i_occ < ncoreact; i_occ++) { double e_occ = efp->ai_orbital_energies[i_occ]; for (size_t i = 0; i < 3; i++) { double di = get_dip_int(efp, i_occ, i_vir, i); for (size_t j = 0; j < 3; j++) { double dj = get_dip_int(efp, i_occ, i_vir, j); sum += di * dj * quadrature(pt->tensor, i, j, e_vir - e_occ); } } } } return -sum / PI; } static void compute_ai_disp_range(struct efp *efp, size_t from, size_t to, void *data) { double energy = 0.0; (void)data; #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) reduction(+:energy) #endif for (size_t i = from; i < to; i++) { size_t n_pt = efp->frags[i].n_dynamic_polarizable_pts; for (size_t j = 0; j < n_pt; j++) energy += compute_ai_disp_pt(efp, i, j); } efp->energy.ai_dispersion += energy; } enum efp_result efp_compute_ai_disp(struct efp *efp) { if (!(efp->opts.terms & EFP_TERM_AI_DISP)) return EFP_RESULT_SUCCESS; if (efp->do_gradient) { efp_log("gradient for AI/EFP dispersion is not implemented"); return EFP_RESULT_FATAL; } efp_balance_work(efp, compute_ai_disp_range, NULL); efp_allreduce(&efp->energy.ai_dispersion, 1); return EFP_RESULT_SUCCESS; }
vect-simd-clone-3.c
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd" } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #include "tree-vect.h" #ifndef N #define N 1024 #endif int d[N], e[N]; #pragma omp declare simd simdlen(4) notinbranch uniform(b) linear(c:3) __attribute__((noinline)) int foo (int a, int b, int c) { if (a < 30) return 5; return a + b + c; } __attribute__((noinline, noclone)) void bar () { int i; #pragma omp simd for (i = 0; i < N; ++i) { d[i] = foo (i, 123, i * 3); e[i] = e[i] + i; } } int main () { int i; check_vect (); bar (); for (i = 0; i < N; i++) if (d[i] != (i < 30 ? 5 : i * 4 + 123) || e[i] != i) abort (); return 0; }
barrier.c
// RUN: %compile-run-and-check #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int data, out, flag = 0; #pragma omp target teams num_teams(2) map(tofrom \ : out) map(to \ : data, flag) \ thread_limit(1) #pragma omp parallel num_threads(1) { if (omp_get_team_num() == 0) { /* Write to the data buffer that will be read by thread in team 1 */ data = 42; /* Flush data to thread in team 1 */ #pragma omp barrier /* Set flag to release thread in team 1 */ #pragma omp atomic write flag = 1; } else if (omp_get_team_num() == 1) { /* Loop until we see the update to the flag */ int val; do { #pragma omp atomic read val = flag; } while (val < 1); out = data; #pragma omp barrier } } // CHECK: out=42. /* Value of out will be 42 */ printf("out=%d.\n", out); return !(out == 42); }
render.h
#ifndef _RENDER_H_ #define _RENDER_H_ #include <iostream> #include "cameras.h" #include "ppm.h" #include "radiance.h" #include "random.h" namespace edupt { int render_dof(const int width, const int height, const int samples, const int supersamples, double focus, double aperture) { // カメラ位置 const Vec camera_position = Vec(50.0, 52.0, 220.0); const Vec camera_dir = normalize(Vec(0.0, -0.04, -1.0)); const Vec camera_up = Vec(0.0, 1.0, 0.0); // ワールド座標系でのスクリーンの大きさ const double screen_width = 30.0 * width / height; const double screen_height = 30.0; // スクリーンまでの距離 const double screen_dist = 40.0; // スクリーンを張るベクトル const Vec screen_x = normalize(cross(camera_dir, camera_up)) * screen_width; const Vec screen_y = normalize(cross(screen_x, camera_dir)) * screen_height; const Vec screen_center = camera_position + camera_dir * screen_dist; Color *image = new Color[width * height]; std::random_device seed_gen_; std::mt19937 engine_ = std::mt19937(seed_gen_()); std::cout << width << "x" << height << " " << samples * (supersamples * supersamples) << " spp" << std::endl; DoFCamera camera(width, height, screen_height, screen_dist, camera_position, camera_dir, camera_up, supersamples, aperture, focus, engine_); // PinholeCamera camera(width, height, screen_height, screen_dist, camera_position, camera_dir, camera_up, // supersamples); // OpenMP // #pragma omp parallel for schedule(dynamic, 1) num_threads(4) for (int y = 0; y < height; y++) { std::cerr << "Rendering (y = " << y << ") " << (100.0 * y / (height - 1)) << "%" << std::endl; // Random rnd(y + 1); ValueSampler<double> rnd(0, 1); for (int x = 0; x < width; x++) { const int image_index = (height - y - 1) * width + x; // supersamples x supersamples のスーパーサンプリング for (int sy = 0; sy < supersamples; sy++) { for (int sx = 0; sx < supersamples; sx++) { Color accumulated_radiance = Color(); // 一つのサブピクセルあたりsamples回サンプリングする for (int s = 0; s < samples; s++) { // accumulated_radiance = // accumulated_radiance + radiance(camera.get_ray(x, y, sx, sy), &rnd, 0) / samples / // (supersamples * supersamples); accumulated_radiance = accumulated_radiance + radiance_loop(camera.get_ray(x, y, sx, sy), &rnd) / samples / (supersamples * supersamples); } image[image_index] = image[image_index] + accumulated_radiance; } } } } // 出力 save_ppm_file(std::string("image" + std::to_string(focus) + ".ppm"), image, width, height); return 0; } int render(const int width, const int height, const int samples, const int supersamples) { // カメラ位置 const Vec camera_position = Vec(50.0, 52.0, 220.0); const Vec camera_dir = normalize(Vec(0.0, -0.04, -1.0)); const Vec camera_up = Vec(0.0, 1.0, 0.0); // ワールド座標系でのスクリーンの大きさ const double screen_width = 30.0 * width / height; const double screen_height = 30.0; // スクリーンまでの距離 const double screen_dist = 40.0; // スクリーンを張るベクトル const Vec screen_x = normalize(cross(camera_dir, camera_up)) * screen_width; const Vec screen_y = normalize(cross(screen_x, camera_dir)) * screen_height; const Vec screen_center = camera_position + camera_dir * screen_dist; Color *image = new Color[width * height]; std::cout << width << "x" << height << " " << samples * (supersamples * supersamples) << " spp" << std::endl; // OpenMP // #pragma omp parallel for schedule(dynamic, 1) num_threads(4) for (int y = 0; y < height; y++) { std::cerr << "Rendering (y = " << y << ") " << (100.0 * y / (height - 1)) << "%" << std::endl; // Random rnd(y + 1); ValueSampler<double> rnd(0, 1); for (int x = 0; x < width; x++) { const int image_index = (height - y - 1) * width + x; // supersamples x supersamples のスーパーサンプリング for (int sy = 0; sy < supersamples; sy++) { for (int sx = 0; sx < supersamples; sx++) { Color accumulated_radiance = Color(); // 一つのサブピクセルあたりsamples回サンプリングする for (int s = 0; s < samples; s++) { const double rate = (1.0 / supersamples); const double r1 = sx * rate + rate / 2.0; const double r2 = sy * rate + rate / 2.0; // スクリーン上の位置 const Vec screen_position = screen_center + screen_x * ((r1 + x) / width - 0.5) + screen_y * ((r2 + y) / height - 0.5); // レイを飛ばす方向 const Vec dir = normalize(screen_position - camera_position); accumulated_radiance = accumulated_radiance + radiance_loop(Ray(camera_position, dir), &rnd) / samples / (supersamples * supersamples); } image[image_index] = image[image_index] + accumulated_radiance; } } } } // 出力 save_ppm_file(std::string("image_1.ppm"), image, width, height); return 0; } }; // namespace edupt #endif
BIDMat_CBLAS.c
#include <jni.h> #include <mkl.h> #include <mkl_trans.h> #include <omp.h> JNIEXPORT jdouble JNICALL Java_edu_berkeley_bid_CBLAS_ddot (JNIEnv * env, jobject calling_obj, jint N, jdoubleArray jX, jint incX, jdoubleArray jY, jint incY){ jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jdouble returnValue; returnValue = cblas_ddot(N, X, incX, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); return returnValue; } JNIEXPORT jdouble JNICALL Java_edu_berkeley_bid_CBLAS_ddotxx (JNIEnv * env, jobject calling_obj, jint N, jdoubleArray jX, jint startX, jdoubleArray jY, jint startY){ jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jdouble returnValue; returnValue = cblas_ddot(N, X+startX, 1, Y+startY, 1); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); return returnValue; } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_ddotm (JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jdoubleArray jX, jint ldx, jdoubleArray jY, jint ldy, jdoubleArray jZ){ jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jdouble * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); int i; for (i = 0; i < ncols; i++) { Z[i] = cblas_ddot(nrows, X+i*ldx, 1, Y+i*ldy, 1); } (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_ddotr (JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jdoubleArray jX, jint ldx, jdoubleArray jY, jint ldy, jdoubleArray jZ){ jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jdouble * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); int i, j; for (i = 0; i < ncols; i++) { #pragma omp parallel for for (j = 0; j < nrows; j++) { Z[j] += X[j + i*ldx] * Y[j + i*ldy]; } } (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_daxpy (JNIEnv * env, jobject calling_obj, jint N, jdouble a, jdoubleArray jX, jint incX, jdoubleArray jY, jint incY){ jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); cblas_daxpy(N, a, X, incX, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_daxpyxx (JNIEnv * env, jobject calling_obj, jint N, jdouble a, jdoubleArray jX, jint startX, jdoubleArray jY, jint startY){ jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); cblas_daxpy(N, a, X+startX, 1, Y+startY, 1); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dgemv (JNIEnv * env, jobject calling_obj, jint order, jint transA, jint M, jint N, jdouble alpha, jdoubleArray jA, jint lda, jdoubleArray jX, jint incX, jdouble beta, jdoubleArray jY, jint incY){ jdouble * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); jdouble * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jdouble * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); cblas_dgemv((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, M, N, alpha, A, lda, X, incX, beta, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); (*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dgemm (JNIEnv * env, jobject calling_obj, jint order, jint transA, jint transB, jint M, jint N, jint K, jdouble alpha, jdoubleArray jA, jint lda, jdoubleArray jB, jint ldb, jdouble beta, jdoubleArray jC, jint ldc){ jdouble * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); jdouble * B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE); jdouble * C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE); cblas_dgemm((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); (*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_domatcopy (JNIEnv * env, jobject calling_obj, jstring j_order, jstring j_transA, jint M, jint N, jdouble alpha, jdoubleArray j_A, jint lda, jdoubleArray j_B, jint ldb) { char * order = (char *)(*env)->GetStringUTFChars(env, j_order, 0); char * transA = (char *)(*env)->GetStringUTFChars(env, j_transA, 0); jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); mkl_domatcopy(order[0], transA[0], M, N, alpha, A, lda, B, ldb); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); (*env)->ReleaseStringUTFChars(env, j_transA, transA); (*env)->ReleaseStringUTFChars(env, j_order, order); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dmcscm (JNIEnv * env, jobject calling_obj, jint M, jint N, jdoubleArray j_A, jint lda, jdoubleArray j_B, jintArray j_ir, jintArray j_jc, jdoubleArray j_C, jint ldc){ jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jdouble * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i, j, ir0; for (i = 0; i < N; i++) { for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { ir0 = ir[j]-ioff; cblas_daxpy(M, B[j], A+(ir0*lda), 1, C+(i*ldc), 1); } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_dmcsrm (JNIEnv * env, jobject calling_obj, jint M, jint N, jdoubleArray j_A, jint lda, jdoubleArray j_B, jintArray j_ir, jintArray j_jc, jdoubleArray j_C, jint ldc){ jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jdouble * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i, j, k; for (i = 0; i < N; i++) { for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { k = ir[j]-ioff; cblas_daxpy(M, B[j], A+(i*lda), 1, C+(k*ldc), 1); } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); } JNIEXPORT jfloat JNICALL Java_edu_berkeley_bid_CBLAS_sdot (JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint incX, jfloatArray jY, jint incY){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat returnValue; returnValue = cblas_sdot(N, X, incX, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); return returnValue; } JNIEXPORT jfloat JNICALL Java_edu_berkeley_bid_CBLAS_sdotxx (JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint startX, jfloatArray jY, jint startY){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat returnValue; returnValue = cblas_sdot(N, X+startX, 1, Y+startY, 1); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); return returnValue; } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sdotm (JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); int i, j; for (i = 0; i < ncols; i++) { Z[i] = cblas_sdot(nrows, X+i*ldx, 1, Y+i*ldy, 1); } (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sdotr (JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); int i, j; for (i = 0; i < ncols; i++) { #pragma omp parallel for for (j = 0; j < nrows; j++) { Z[j] += X[j + i*ldx] * Y[j + i*ldy]; } } (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sgemv (JNIEnv * env, jobject calling_obj, jint order, jint transA, jint M, jint N, jfloat alpha, jfloatArray jA, jint lda, jfloatArray jX, jint incX, jfloat beta, jfloatArray jY, jint incY){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); cblas_sgemv((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, M, N, alpha, A, lda, X, incX, beta, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); (*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_sgemm (JNIEnv * env, jobject calling_obj, jint order, jint transA, jint transB, jint M, jint N, jint K, jfloat alpha, jfloatArray jA, jint lda, jfloatArray jB, jint ldb, jfloat beta, jfloatArray jC, jint ldc){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); jfloat * B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE); jfloat * C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE); cblas_sgemm((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); (*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_somatcopy (JNIEnv * env, jobject calling_obj, jstring j_order, jstring j_transA, jint M, jint N, jfloat alpha, jfloatArray j_A, jint lda, jfloatArray j_B, jint ldb) { char * order = (char *)(*env)->GetStringUTFChars(env, j_order, 0); char * transA = (char *)(*env)->GetStringUTFChars(env, j_transA, 0); jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); mkl_somatcopy(order[0], transA[0], M, N, alpha, A, lda, B, ldb); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); (*env)->ReleaseStringUTFChars(env, j_transA, transA); (*env)->ReleaseStringUTFChars(env, j_order, order); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_saxpy (JNIEnv * env, jobject calling_obj, jint N, jfloat a, jfloatArray jX, jint incX, jfloatArray jY, jint incY){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); cblas_saxpy(N, a, X, incX, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_saxpyxx (JNIEnv * env, jobject calling_obj, jint N, jfloat a, jfloatArray jX, jint startX, jfloatArray jY, jint startY){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); cblas_saxpy(N, a, X+startX, 1, Y+startY, 1); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_smcscm (JNIEnv * env, jobject calling_obj, jint M, jint N, jfloatArray j_A, jint lda, jfloatArray j_B, jintArray j_ir, jintArray j_jc, jfloatArray j_C, jint ldc){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jfloat * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i, j, ir0; for (i = 0; i < N; i++) { for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { ir0 = ir[j]-ioff; cblas_saxpy(M, B[j], A+(ir0*lda), 1, C+(i*ldc), 1); } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_smcsrm (JNIEnv * env, jobject calling_obj, jint M, jint N, jfloatArray j_A, jint lda, jfloatArray j_B, jintArray j_ir, jintArray j_jc, jfloatArray j_C, jint ldc){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jfloat * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i, j, jj, k; for (i = 0; i < N; i++) { for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { jj = ir[j]-ioff; if (M == 1) { C[jj*ldc] += B[j] * A[i*lda]; } else if (M > 10) { cblas_saxpy(M, B[j], A+(i*lda), 1, C+(jj*ldc), 1); } else { int iia = i*lda; int jjc = jj*ldc; float Bj = B[j]; for (k = 0; k < M; k++) { C[jjc+k] += Bj * A[iia+k]; } } } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdot (JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint incX, jfloatArray jY, jint incY, jfloatArray jZ){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); cblas_cdotu_sub(N, X, incX, Y, incY, Z); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdotxx (JNIEnv * env, jobject calling_obj, jint N, jfloatArray jX, jint startX, jfloatArray jY, jint startY, jfloatArray jZ){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); cblas_cdotu_sub(N, X+startX, 1, Y+startY, 1, Z); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdotm (JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); int i; for (i=0; i<2*ncols; i+=2) { cblas_cdotu_sub(nrows, X+i*ldx, 1, Y+i*ldy, 1, Z+i); } (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cdotr (JNIEnv * env, jobject calling_obj, jint nrows, jint ncols, jfloatArray jX, jint ldx, jfloatArray jY, jint ldy, jfloatArray jZ){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * Z = (*env)->GetPrimitiveArrayCritical(env, jZ, JNI_FALSE); int i, j, ix, iy; for (i = 0; i < ncols; i++) { #pragma omp parallel for for (j = 0; j < nrows; j++) { ix = 2*(j + i*ldx); iy = 2*(j + i*ldy); Z[2*j] += X[ix] * Y[ix] - X[ix+1] * Y[ix+1]; Z[2*j+1] += X[ix] * Y[ix+1] + X[ix+1] * Y[ix]; } } (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jZ, Z, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cgemv (JNIEnv * env, jobject calling_obj, jint order, jint transA, jint M, jint N, jfloatArray jAlpha, jfloatArray jA, jint lda, jfloatArray jX, jint incX, jfloatArray jBeta, jfloatArray jY, jint incY){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * alpha = (*env)->GetPrimitiveArrayCritical(env, jAlpha, JNI_FALSE); jfloat * beta = (*env)->GetPrimitiveArrayCritical(env, jBeta, JNI_FALSE); cblas_cgemv((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, M, N, alpha, A, lda, X, incX, beta, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jBeta, beta, 0); (*env)->ReleasePrimitiveArrayCritical(env, jAlpha, alpha, 0); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); (*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_cgemm (JNIEnv * env, jobject calling_obj, jint order, jint transA, jint transB, jint M, jint N, jint K, jfloatArray jAlpha, jfloatArray jA, jint lda, jfloatArray jB, jint ldb, jfloatArray jBeta, jfloatArray jC, jint ldc){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); jfloat * B = (*env)->GetPrimitiveArrayCritical(env, jB, JNI_FALSE); jfloat * C = (*env)->GetPrimitiveArrayCritical(env, jC, JNI_FALSE); jfloat * alpha = (*env)->GetPrimitiveArrayCritical(env, jAlpha, JNI_FALSE); jfloat * beta = (*env)->GetPrimitiveArrayCritical(env, jBeta, JNI_FALSE); cblas_cgemm((CBLAS_ORDER)order, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); (*env)->ReleasePrimitiveArrayCritical(env, jC, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, jB, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, jA, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_caxpy (JNIEnv * env, jobject calling_obj, jint N, jfloatArray jA, jfloatArray jX, jint incX, jfloatArray jY, jint incY){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * a = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); cblas_caxpy(N, a, X, incX, Y, incY); (*env)->ReleasePrimitiveArrayCritical(env, jA, a, 0); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_CBLAS_caxpyxx (JNIEnv * env, jobject calling_obj, jint N, jfloatArray jA, jfloatArray jX, jint startX, jfloatArray jY, jint startY){ jfloat * X = (*env)->GetPrimitiveArrayCritical(env, jX, JNI_FALSE); jfloat * Y = (*env)->GetPrimitiveArrayCritical(env, jY, JNI_FALSE); jfloat * a = (*env)->GetPrimitiveArrayCritical(env, jA, JNI_FALSE); cblas_caxpy(N, a, X+startX, 1, Y+startY, 1); (*env)->ReleasePrimitiveArrayCritical(env, jA, a, 0); (*env)->ReleasePrimitiveArrayCritical(env, jY, Y, 0); (*env)->ReleasePrimitiveArrayCritical(env, jX, X, 0); }
GB_binop__eq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint8) // A*D function (colscale): GB (_AxD__eq_uint8) // D*A function (rowscale): GB (_DxB__eq_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint8) // C=scalar+B GB (_bind1st__eq_uint8) // C=scalar+B' GB (_bind1st_tran__eq_uint8) // C=A+scalar GB (_bind2nd__eq_uint8) // C=A'+scalar GB (_bind2nd_tran__eq_uint8) // C type: bool // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT8 || GxB_NO_EQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % John Cristy % % July 2009 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *fourier) { double *roll; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ roll=(double *) AcquireQuantumMemory((size_t) height,width*sizeof(*roll)); if (roll == (double *) NULL) return(MagickFalse); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; roll[v*width+u]=fourier[i++]; } } (void) CopyMagickMemory(fourier,roll,height*width*sizeof(*roll)); roll=(double *) RelinquishMagickMemory(roll); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source,double *destination) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,source); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*y+x+width/2L]=source[center*y+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*(height-y)+width/2L-x-1L]=source[center*y+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) destination[-x+width/2L-1L]=destination[x+width/2L+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_source, *phase_source; Image *magnitude_image, *phase_image; MagickBooleanType status; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) return(MagickFalse); (void) ResetMagickMemory(magnitude_source,0,fourier_info->height* fourier_info->width*sizeof(*magnitude_source)); phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height, magnitude,magnitude_source); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height,phase, phase_source); CorrectPhaseLHS(fourier_info->height,fourier_info->height,phase_source); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]/=(2.0*MagickPI); phase_source[i]+=0.5; i++; } } magnitude_view=AcquireCacheView(magnitude_image); phase_view=AcquireCacheView(phase_image); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange* phase_source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* phase_source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); magnitude_view=DestroyCacheView(magnitude_view); phase_source=(double *) RelinquishMagickMemory(phase_source); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *image_view; double n, *source; fftw_complex *fourier; fftw_plan fftw_r2c_plan; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } ResetMagickMemory(source,0,fourier_info->height*fourier_info->width* sizeof(*source)); i=0L; image_view=AcquireCacheView(image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } image_view=DestroyCacheView(image_view); fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source=(double *) RelinquishMagickMemory(source); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->width, source,fourier,FFTW_ESTIMATE); fftw_execute(fftw_r2c_plan); fftw_destroy_plan(fftw_r2c_plan); source=(double *) RelinquishMagickMemory(source); /* Normalize Fourier transform. */ n=(double) fourier_info->width*(double) fourier_info->width; i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]/=n; #else fourier[i][0]/=n; fourier[i][1]/=n; #endif i++; } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=cabs(fourier[i]); phase[i]=carg(fourier[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=creal(fourier[i]); phase[i]=cimag(fourier[i]); i++; } fourier=(fftw_complex *) RelinquishMagickMemory(fourier); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=ForwardFourierTransform(&fourier_info,image,magnitude,phase,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude,phase, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t extent, width; width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } magnitude_image=CloneImage(image,width,width,MagickFalse,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,width,MagickFalse,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsGrayImage(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayChannels,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->matte != MagickFalse) thread_status=ForwardFourierTransformChannel(image, OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2.0)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[center*(height-y)-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[center*y]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image,fftw_complex *fourier, ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude, *phase, *magnitude_source, *phase_source; MagickBooleanType status; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } i=0L; magnitude_view=AcquireCacheView(magnitude_image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { magnitude_source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { magnitude_source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { magnitude_source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { magnitude_source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { magnitude_source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } i=0L; phase_view=AcquireCacheView(phase_image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { phase_source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { phase_source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { phase_source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { phase_source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { phase_source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]-=0.5; phase_source[i]*=(2.0*MagickPI); i++; } } magnitude_view=DestroyCacheView(magnitude_view); phase_view=DestroyCacheView(phase_view); magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_source,magnitude); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } CorrectPhaseLHS(fourier_info->width,fourier_info->width,phase_source); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_source,phase); phase_source=(double *) RelinquishMagickMemory(phase_source); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]*cos(phase[i])+I*magnitude[i]*sin(phase[i]); #else fourier[i][0]=magnitude[i]*cos(phase[i]); fourier[i][1]=magnitude[i]*sin(phase[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]+I*phase[i]; #else fourier[i][0]=magnitude[i]; fourier[i][1]=phase[i]; #endif i++; } phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier,Image *image,ExceptionInfo *exception) { CacheView *image_view; double *source; fftw_plan fftw_c2r_plan; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; ssize_t y; source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif { fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier,source,FFTW_ESTIMATE); fftw_execute(fftw_c2r_plan); fftw_destroy_plan(fftw_c2r_plan); } i=0L; image_view=AcquireCacheView(image); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*source[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source=(double *) RelinquishMagickMemory(source); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=magnitude_image->columns; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2.0)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=InverseFourier(&fourier_info,magnitude_image,phase_image,fourier, exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,fourier,fourier_image, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickFalse,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsGrayImage(magnitude_image,exception); if (is_gray != MagickFalse) is_gray=IsGrayImage(phase_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayChannels,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
gemv_openmp.c
#include <stdlib.h> #include <stdio.h> #include "../../support/timer.h" #include "gemv_utils.h" int main(int argc, char *argv[]) { const size_t rows = 20480; const size_t cols = 8192; double **A, *b, *x; b = (double*) malloc(sizeof(double)*rows); x = (double*) malloc(sizeof(double)*cols); allocate_dense(rows, cols, &A); make_hilbert_mat(rows,cols, &A); #pragma omp parallel { #pragma omp for for (size_t i = 0; i < cols; i++) { x[i] = (double) i+1 ; } #pragma omp for for (size_t i = 0; i < rows; i++) { b[i] = (double) 0.0; } } Timer timer; start(&timer, 0, 0); gemv(A, x, rows, cols, &b); stop(&timer, 0); printf("Kernel "); print(&timer, 0, 1); printf("\n"); #if 0 print_vec(x, rows); print_mat(A, rows, cols); print_vec(b, rows); #endif printf("sum(x) = %f, sum(Ax) = %f\n", sum_vec(x,cols), sum_vec(b,rows)); return 0; } void gemv(double** A, double* x, size_t rows, size_t cols, double** b) { #pragma omp parallel for for (size_t i = 0; i < rows; i ++ ) for (size_t j = 0; j < cols; j ++ ) { (*b)[i] = (*b)[i] + A[i][j]*x[j]; } } void make_hilbert_mat(size_t rows, size_t cols, double*** A) { #pragma omp parallel for for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { (*A)[i][j] = 1.0/( (double) i + (double) j + 1.0); } } } double sum_vec(double* vec, size_t rows) { double sum = 0.0; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < rows; i++) sum = sum + vec[i]; return sum; }
resource_strings.h
#pragma once #include <ATen/code_template.h> namespace torch { namespace jit { namespace fuser { namespace cpu { /*with type_as not checking type of its input, a fusion group can have non-fp32 tensor as input. Correct code for this case is generated, however, nvrtc does not know how to handle int*_t integer types, so typedefs help it handle those cases*/ static auto type_declarations_template = at::jit::CodeTemplate(R"( #define POS_INFINITY INFINITY #define NEG_INFINITY -INFINITY typedef ${IndexType} IndexType; template<typename T, size_t N> struct TensorInfo { T* data; IndexType sizes[N]; IndexType strides[N]; }; template<typename T> struct TensorInfo<T, 0> { T * data; }; )"); static auto cpu_compilation_unit_template = at::jit::CodeTemplate(R"( #include <math.h> #include <cstddef> #include <cstdint> double rsqrt(double x) { return 1.0/sqrt(x); } float rsqrtf(float x) { return 1.0f/sqrtf(x); } double frac(double x) { return x - trunc(x); } float fracf(float x) { return x - truncf(x); } ${type_declarations} #ifdef _MSC_VER template<size_t n> struct int_of_size; #define DEFINE_INT_OF_SIZE(int_t) \ template<> struct int_of_size<sizeof(int_t)> { using type = int_t; } DEFINE_INT_OF_SIZE(int64_t); DEFINE_INT_OF_SIZE(int32_t); DEFINE_INT_OF_SIZE(int16_t); DEFINE_INT_OF_SIZE(int8_t); #undef DEFINE_INT_OF_SIZE template <typename T> using int_same_size_t = typename int_of_size<sizeof(T)>::type; #define IndexTypeLoop int_same_size_t<IndexType> #define ToIndexTypeLoop(x) static_cast<IndexTypeLoop>(x) #else #define IndexTypeLoop IndexType #define ToIndexTypeLoop(x) x #endif #define OMP_THRESHOLD 100000 static void ${kernelName}_kernel(IndexType totalElements, ${formals}) { #pragma omp parallel for if(totalElements > OMP_THRESHOLD) for (IndexTypeLoop linearIndex = 0; linearIndex < ToIndexTypeLoop(totalElements); linearIndex += 1) { // Convert `linearIndex` into an offset of tensor: ${tensorOffsets} // calculate the results ${kernelBody} } } #ifdef _WIN32 #define JIT_API __declspec(dllexport) #else #define JIT_API #endif extern "C" JIT_API void ${kernelName}(IndexType totalElements, void ** args) { ${kernelName}_kernel(totalElements ${,argument_loads}); } )"); } // namespace cpu } // namespace fuser } // namespace jit } // namespace torch
GB_unop__identity_fc32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_fc64 // op(A') function: GB_unop_tran__identity_fc32_fc64 // C type: GxB_FC32_t // A type: GxB_FC64_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_fc64 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
struct.c
// RUN: %libomptarget-compile-generic -fopenmp-extensions // RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace // Wrong results on amdgpu // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-newDriver #include <omp.h> #include <stdio.h> #define CHECK_PRESENCE(Var1, Var2, Var3) \ printf(" presence of %s, %s, %s: %d, %d, %d\n", \ #Var1, #Var2, #Var3, \ omp_target_is_present(&(Var1), omp_get_default_device()), \ omp_target_is_present(&(Var2), omp_get_default_device()), \ omp_target_is_present(&(Var3), omp_get_default_device())) #define CHECK_VALUES(Var1, Var2) \ printf(" values of %s, %s: %d, %d\n", \ #Var1, #Var2, (Var1), (Var2)) int main() { struct S { int i; int j; } s; // CHECK: presence of s, s.i, s.j: 0, 0, 0 CHECK_PRESENCE(s, s.i, s.j); // ======================================================================= // Check that ompx_hold keeps entire struct present. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on first member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(ompx_hold,tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.i applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on last member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(tofrom: s.i) \ map(ompx_hold,tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.j applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on struct\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold,tofrom: s) map(tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ======================================================================= // Check that transfer to/from host checks reference count correctly. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent DynRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(to: s.i, s.j) { // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent HoldRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) #pragma omp target data map(tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(ompx_hold, to: s.i, s.j) { // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} // // At the beginning of a region, if the parent's TotalRefCount=1, then the // transfer should happen. // // At the end of a region, it also must be true that the reference count being // decremented is the reference count that is 1. printf("check: parent TotalRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, tofrom: s.i, s.j) { s.i = 21; s.j = 31; } #pragma omp target exit data map(from: s.i, s.j) // No transfer here even though parent's TotalRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); return 0; }
FGP_dTV_core.c
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2019 Daniil Kazantsev Copyright 2019 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "FGP_dTV_core.h" /* C-OMP implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case) * which employs structural similarity of the level sets of two images/volumes, see [1,2] * The current implementation updates image 1 while image 2 is being fixed. * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED] * 3. lambdaPar - regularization parameter [REQUIRED] * 4. Number of iterations [OPTIONAL] * 5. eplsilon: tolerance constant [OPTIONAL] * 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] * * 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL] * 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL] * 9. print information: 0 (off) or 1 (on) [OPTIONAL] * * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the Matlab's codes and papers by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" * [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 1084–1106 */ float dTV_FGP_CPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iterationsNumb, float epsil, float eta, int methodTV, int nonneg, int dimX, int dimY, int dimZ) { int ll; long j, DimTotal; float re, re1; re = 0.0f; re1 = 0.0f; float tk = 1.0f; float tkp1=1.0f; int count = 0; float *Output_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL; DimTotal = (long)(dimX*dimY*dimZ); if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float)); P1 = calloc(DimTotal, sizeof(float)); P2 = calloc(DimTotal, sizeof(float)); P1_prev = calloc(DimTotal, sizeof(float)); P2_prev = calloc(DimTotal, sizeof(float)); R1 = calloc(DimTotal, sizeof(float)); R2 = calloc(DimTotal, sizeof(float)); InputRef_x = calloc(DimTotal, sizeof(float)); InputRef_y = calloc(DimTotal, sizeof(float)); if (dimZ <= 1) { /*2D case */ /* calculate gradient field (smoothed) for the reference image */ GradNorm_func2D(InputRef, InputRef_x, InputRef_y, eta, (long)(dimX), (long)(dimY)); /* begin iterations */ for(ll=0; ll<iterationsNumb; ll++) { if ((epsil != 0.0f) && (ll % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l); /*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/ ProjectVect_func2D(R1, R2, InputRef_x, InputRef_y, (long)(dimX), (long)(dimY)); /* computing the gradient of the objective function */ Obj_dfunc2D(Input, Output, R1, R2, lambdaPar, (long)(dimX), (long)(dimY)); /* apply nonnegativity */ if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;} /*Taking a step towards minus of the gradient*/ Grad_dfunc2D(P1, P2, Output, R1, R2, InputRef_x, InputRef_y, lambdaPar, (long)(dimX), (long)(dimY)); /* projection step */ Proj_dfunc2D(P1, P2, methodTV, DimTotal); /*updating R and t*/ tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; Rupd_dfunc2D(P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, DimTotal); copyIm(P1, P1_prev, (long)(dimX), (long)(dimY), 1l); copyIm(P2, P2_prev, (long)(dimX), (long)(dimY), 1l); tk = tkp1; /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } } else { /*3D case*/ float *P3=NULL, *P3_prev=NULL, *R3=NULL, *InputRef_z=NULL; P3 = calloc(DimTotal, sizeof(float)); P3_prev = calloc(DimTotal, sizeof(float)); R3 = calloc(DimTotal, sizeof(float)); InputRef_z = calloc(DimTotal, sizeof(float)); /* calculate gradient field (smoothed) for the reference volume */ GradNorm_func3D(InputRef, InputRef_x, InputRef_y, InputRef_z, eta, (long)(dimX), (long)(dimY), (long)(dimZ)); /* begin iterations */ for(ll=0; ll<iterationsNumb; ll++) { if ((epsil != 0.0f) && (ll % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/ ProjectVect_func3D(R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, (long)(dimX), (long)(dimY), (long)(dimZ)); /* computing the gradient of the objective function */ Obj_dfunc3D(Input, Output, R1, R2, R3, lambdaPar, (long)(dimX), (long)(dimY), (long)(dimZ)); /* apply nonnegativity */ if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;} /*Taking a step towards minus of the gradient*/ Grad_dfunc3D(P1, P2, P3, Output, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, lambdaPar, (long)(dimX), (long)(dimY), (long)(dimZ)); /* projection step */ Proj_dfunc3D(P1, P2, P3, methodTV, DimTotal); /*updating R and t*/ tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; Rupd_dfunc3D(P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, DimTotal); /*storing old values*/ copyIm(P1, P1_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); copyIm(P2, P2_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); copyIm(P3, P3_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); tk = tkp1; /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } free(P3); free(P3_prev); free(R3); free(InputRef_z); } if (epsil != 0.0f) free(Output_prev); free(P1); free(P2); free(P1_prev); free(P2_prev); free(R1); free(R2); free(InputRef_x); free(InputRef_y); /*adding info into info_vector */ infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ float GradNorm_func2D(float *B, float *B_x, float *B_y, float eta, long dimX, long dimY) { long i,j,index; float val1, val2, gradX, gradY, magn; #pragma omp parallel for shared(B, B_x, B_y) private(i,j,index,val1,val2,gradX,gradY,magn) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; /* zero boundary conditions */ if (i == dimX-1) {val1 = 0.0f;} else {val1 = B[j*dimX + (i+1)];} if (j == dimY-1) {val2 = 0.0f;} else {val2 = B[(j+1)*dimX + i];} gradX = val1 - B[index]; gradY = val2 - B[index]; magn = pow(gradX,2) + pow(gradY,2); magn = sqrt(magn + pow(eta,2)); /* the eta-smoothed gradients magnitude */ B_x[index] = gradX/magn; B_y[index] = gradY/magn; }} return 1; } float ProjectVect_func2D(float *R1, float *R2, float *B_x, float *B_y, long dimX, long dimY) { long i,j,index; float in_prod; #pragma omp parallel for shared(R1, R2, B_x, B_y) private(index,i,j,in_prod) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; in_prod = R1[index]*B_x[index] + R2[index]*B_y[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*B_x[index]; R2[index] = R2[index] - in_prod*B_y[index]; }} return 1; } float Obj_dfunc2D(float *A, float *D, float *R1, float *R2, float lambda, long dimX, long dimY) { float val1, val2; long i,j,index; #pragma omp parallel for shared(A,D,R1,R2) private(index,i,j,val1,val2) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; /* boundary conditions */ if (i == 0) {val1 = 0.0f;} else {val1 = R1[j*dimX + (i-1)];} if (j == 0) {val2 = 0.0f;} else {val2 = R2[(j-1)*dimX + i];} D[index] = A[index] - lambda*(R1[index] + R2[index] - val1 - val2); }} return *D; } float Grad_dfunc2D(float *P1, float *P2, float *D, float *R1, float *R2, float *B_x, float *B_y, float lambda, long dimX, long dimY) { float val1, val2, multip, in_prod; long i,j,index; multip = (1.0f/(8.0f*lambda)); #pragma omp parallel for shared(P1,P2,D,R1,R2,B_x,B_y,multip) private(i,j,index,val1,val2,in_prod) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; /* boundary conditions */ if (i == dimX-1) val1 = 0.0f; else val1 = D[index] - D[j*dimX + (i+1)]; if (j == dimY-1) val2 = 0.0f; else val2 = D[index] - D[(j+1)*dimX + i]; in_prod = val1*B_x[index] + val2*B_y[index]; /* calculate inner product */ val1 = val1 - in_prod*B_x[index]; val2 = val2 - in_prod*B_y[index]; P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; }} return 1; } float Proj_dfunc2D(float *P1, float *P2, int methTV, long DimTotal) { float val1, val2, denom, sq_denom; long i; if (methTV == 0) { /* isotropic TV*/ #pragma omp parallel for shared(P1,P2) private(i,denom,sq_denom) for(i=0; i<DimTotal; i++) { denom = powf(P1[i],2) + powf(P2[i],2); if (denom > 1.0f) { sq_denom = 1.0f/sqrtf(denom); P1[i] = P1[i]*sq_denom; P2[i] = P2[i]*sq_denom; } } } else { /* anisotropic TV*/ #pragma omp parallel for shared(P1,P2) private(i,val1,val2) for(i=0; i<DimTotal; i++) { val1 = fabs(P1[i]); val2 = fabs(P2[i]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} P1[i] = P1[i]/val1; P2[i] = P2[i]/val2; } } return 1; } float Rupd_dfunc2D(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, long DimTotal) { long i; float multip; multip = ((tk-1.0f)/tkp1); #pragma omp parallel for shared(P1,P2,P1_old,P2_old,R1,R2,multip) private(i) for(i=0; i<DimTotal; i++) { R1[i] = P1[i] + multip*(P1[i] - P1_old[i]); R2[i] = P2[i] + multip*(P2[i] - P2_old[i]); } return 1; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ float GradNorm_func3D(float *B, float *B_x, float *B_y, float *B_z, float eta, long dimX, long dimY, long dimZ) { long i, j, k, index; float val1, val2, val3, gradX, gradY, gradZ, magn; #pragma omp parallel for shared(B, B_x, B_y, B_z) private(i,j,k,index,val1,val2,val3,gradX,gradY,gradZ,magn) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* zero boundary conditions */ if (i == dimX-1) {val1 = 0.0f;} else {val1 = B[(dimX*dimY)*k + j*dimX+(i+1)];} if (j == dimY-1) {val2 = 0.0f;} else {val2 = B[(dimX*dimY)*k + (j+1)*dimX+i];} if (k == dimZ-1) {val3 = 0.0f;} else {val3 = B[(dimX*dimY)*(k+1) + (j)*dimX+i];} gradX = val1 - B[index]; gradY = val2 - B[index]; gradZ = val3 - B[index]; magn = pow(gradX,2) + pow(gradY,2) + pow(gradZ,2); magn = sqrt(magn + pow(eta,2)); /* the eta-smoothed gradients magnitude */ B_x[index] = gradX/magn; B_y[index] = gradY/magn; B_z[index] = gradZ/magn; }}} return 1; } float ProjectVect_func3D(float *R1, float *R2, float *R3, float *B_x, float *B_y, float *B_z, long dimX, long dimY, long dimZ) { long i,j,k,index; float in_prod; #pragma omp parallel for shared(R1, R2, R3, B_x, B_y, B_z) private(index,i,j,k,in_prod) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; in_prod = R1[index]*B_x[index] + R2[index]*B_y[index] + R3[index]*B_z[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*B_x[index]; R2[index] = R2[index] - in_prod*B_y[index]; R3[index] = R3[index] - in_prod*B_z[index]; }}} return 1; } float Obj_dfunc3D(float *A, float *D, float *R1, float *R2, float *R3, float lambda, long dimX, long dimY, long dimZ) { float val1, val2, val3; long i,j,k,index; #pragma omp parallel for shared(A,D,R1,R2,R3) private(index,i,j,k,val1,val2,val3) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* boundary conditions */ if (i == 0) {val1 = 0.0f;} else {val1 = R1[(dimX*dimY)*k + j*dimX + (i-1)];} if (j == 0) {val2 = 0.0f;} else {val2 = R2[(dimX*dimY)*k + (j-1)*dimX + i];} if (k == 0) {val3 = 0.0f;} else {val3 = R3[(dimX*dimY)*(k-1) + j*dimX + i];} D[index] = A[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3); }}} return *D; } float Grad_dfunc3D(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *B_x, float *B_y, float *B_z, float lambda, long dimX, long dimY, long dimZ) { float val1, val2, val3, multip, in_prod; long i,j,k, index; multip = (1.0f/(26.0f*lambda)); #pragma omp parallel for shared(P1,P2,P3,D,R1,R2,R3,multip) private(index,i,j,k,val1,val2,val3,in_prod) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* boundary conditions */ if (i == dimX-1) val1 = 0.0f; else val1 = D[index] - D[(dimX*dimY)*k + j*dimX + (i+1)]; if (j == dimY-1) val2 = 0.0f; else val2 = D[index] - D[(dimX*dimY)*k + (j+1)*dimX + i]; if (k == dimZ-1) val3 = 0.0f; else val3 = D[index] - D[(dimX*dimY)*(k+1) + j*dimX + i]; in_prod = val1*B_x[index] + val2*B_y[index] + val3*B_z[index]; /* calculate inner product */ val1 = val1 - in_prod*B_x[index]; val2 = val2 - in_prod*B_y[index]; val3 = val3 - in_prod*B_z[index]; P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; P3[index] = R3[index] + multip*val3; }}} return 1; } float Proj_dfunc3D(float *P1, float *P2, float *P3, int methTV, long DimTotal) { float val1, val2, val3, denom, sq_denom; long i; if (methTV == 0) { /* isotropic TV*/ #pragma omp parallel for shared(P1,P2,P3) private(i,val1,val2,val3,sq_denom) for(i=0; i<DimTotal; i++) { denom = powf(P1[i],2) + powf(P2[i],2) + powf(P3[i],2); if (denom > 1.0f) { sq_denom = 1.0f/sqrtf(denom); P1[i] = P1[i]*sq_denom; P2[i] = P2[i]*sq_denom; P3[i] = P3[i]*sq_denom; } } } else { /* anisotropic TV*/ #pragma omp parallel for shared(P1,P2,P3) private(i,val1,val2,val3) for(i=0; i<DimTotal; i++) { val1 = fabs(P1[i]); val2 = fabs(P2[i]); val3 = fabs(P3[i]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} if (val3 < 1.0f) {val3 = 1.0f;} P1[i] = P1[i]/val1; P2[i] = P2[i]/val2; P3[i] = P3[i]/val3; } } return 1; } float Rupd_dfunc3D(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, long DimTotal) { long i; float multip; multip = ((tk-1.0f)/tkp1); #pragma omp parallel for shared(P1,P2,P3,P1_old,P2_old,P3_old,R1,R2,R3,multip) private(i) for(i=0; i<DimTotal; i++) { R1[i] = P1[i] + multip*(P1[i] - P1_old[i]); R2[i] = P2[i] + multip*(P2[i] - P2_old[i]); R3[i] = P3[i] + multip*(P3[i] - P3_old[i]); } return 1; }
GB_binop__bclr_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_uint16 // A.*B function (eWiseMult): GB_AemultB__bclr_uint16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_uint16 // C+=b function (dense accum): GB_Cdense_accumb__bclr_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_uint16 // C=scalar+B GB_bind1st__bclr_uint16 // C=scalar+B' GB_bind1st_tran__bclr_uint16 // C=A+scalar GB_bind2nd__bclr_uint16 // C=A'+scalar GB_bind2nd_tran__bclr_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITCLR (x, y, uint16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bclr_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bclr_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bclr_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
heap_mult.h
#include "CSC.h" #include "utility.h" #include <omp.h> #include <algorithm> #include <iostream> using namespace std; /** ** Count flop of SpGEMM between A and B in CSC format **/ template <typename IT, typename NT> long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B, IT *maxnnzc) { long long int flop = 0; // total flop (multiplication) needed to generate C #pragma omp parallel { long long int tflop=0; //thread private flop #pragma omp for for (IT i=0; i < B.cols; ++i) { // for all columns of B long long int locmax = 0; for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) { // For all the nonzeros of the ith column IT inner = B.rowids[j]; // get the row id of B (or column id of A) IT npins = A.colptr[inner+1] - A.colptr[inner]; // get the number of nonzeros in A's corresponding column locmax += npins; } maxnnzc[i] = locmax; tflop += locmax; } #pragma omp critical { flop += tflop; } } return flop * 2; } template <typename IT, typename NT> long long int get_flop(const CSC<IT,NT> & A, const CSC<IT,NT> & B) { IT *dummy = my_malloc<IT>(B.cols); long long int flop = get_flop(A, B, dummy); my_free<IT>(dummy); return flop; } template <typename IT, typename NT, typename MultiplyOperation, typename AddOperation> void HeapSpGEMM(const CSC<IT,NT> & A, const CSC<IT,NT> & B, CSC<IT,NT> & C, MultiplyOperation multop, AddOperation addop) { int numThreads; #pragma omp parallel { numThreads = omp_get_num_threads(); } // *************** Load-balancing Thread Scheduling ********************* IT *maxnnzc = my_malloc<IT>(B.cols); long long int flops = get_flop(A, B, maxnnzc) / 2; IT flopsPerThread = flops/numThreads; // amount of work that will be assigned to each thread IT *colPerThread = my_malloc<IT>(numThreads + 1); //thread i will process columns from colPerThread[i] to colPerThread[i+1]-1 IT *colStart = my_malloc<IT>(B.cols); //start index in the global array for storing ith column of C IT *colEnd = my_malloc<IT>(B.cols); //end index in the global array for storing ith column of C colStart[0] = 0; colEnd[0] = 0; int curThread = 0; colPerThread[curThread++] = 0; IT nextflops = flopsPerThread; /* Parallelized version */ scan(maxnnzc, colStart, B.cols); #pragma omp parallel for for (int i = 1; i < B.cols; ++i) { colEnd[i] = colStart[i]; } #pragma omp parallel { int tid = omp_get_thread_num(); long end_itr = (lower_bound(colStart, colStart + B.cols, flopsPerThread * (tid + 1))) - colStart; colPerThread[tid + 1] = end_itr; } colPerThread[numThreads] = B.cols; // *************** Creating global space to store result, used by all threads ********************* IT size = colEnd[B.cols-1] + maxnnzc[B.cols-1]; IT **LocalRowIdsofC = my_malloc<IT*>(numThreads); NT **LocalValuesofC = my_malloc<NT*>(numThreads); #pragma omp parallel { int tid = omp_get_thread_num(); IT localsum = 0; for (IT i = colPerThread[tid]; i < colPerThread[tid + 1]; ++i) { localsum += maxnnzc[i]; } LocalRowIdsofC[tid] = my_malloc<IT>(localsum); LocalValuesofC[tid] = my_malloc<NT>(localsum); } my_free<IT>(maxnnzc); // *************** Creating LOCAL heap space to be used by all threads ********************* IT *threadHeapSize = my_malloc<IT>(numThreads); #pragma omp parallel { int thisThread = omp_get_thread_num(); // IT localmax = -1; //incorrect IT localmax = 0; for (IT i = colPerThread[thisThread]; i < colPerThread[thisThread + 1]; ++i) { IT colnnz = B.colptr[i + 1] - B.colptr[i]; if (colnnz > localmax) localmax = colnnz; } threadHeapSize[thisThread] = localmax; } // ************************ Numeric Phase ************************************* #pragma omp parallel { int thisThread = omp_get_thread_num(); HeapEntry<IT, NT> *mergeheap = my_malloc<HeapEntry<IT, NT>>(threadHeapSize[thisThread]); for (IT i = colPerThread[thisThread]; i < colPerThread[thisThread + 1]; ++i) { IT k = 0; // Make initial heap for (IT j = B.colptr[i]; j < B.colptr[i + 1]; ++j) { // For all the nonzeros of the ith column IT inner = B.rowids[j]; // get the row id of B (or column id of A) IT npins = A.colptr[inner + 1] - A.colptr[inner]; // get the number of nonzeros in A's corresponding column if (npins > 0) { mergeheap[k].loc = 1; mergeheap[k].runr = j; // the pointer to B.rowid's is the run-rank mergeheap[k].value = multop(A.values[A.colptr[inner]], B.values[j]); mergeheap[k++].key = A.rowids[A.colptr[inner]]; // A's first rowid is the first key } } IT hsize = k; // if any of A's "significant" columns is empty, k will be less than hsize make_heap(mergeheap, mergeheap + hsize); while(hsize > 0) { pop_heap(mergeheap, mergeheap + hsize); // result is stored in mergeheap[hsize-1] HeapEntry<IT,NT> hentry = mergeheap[hsize - 1]; // Use short circuiting if ((colEnd[i] > colStart[i]) && LocalRowIdsofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]] - 1] == hentry.key) { LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]] - 1] = addop(hentry.value, LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]] - 1]); } else { LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]]= hentry.value; LocalRowIdsofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]]= hentry.key; colEnd[i] ++; } IT inner = B.rowids[hentry.runr]; // If still unused nonzeros exists in A(:,colind), insert the next nonzero to the heap if ((A.colptr[inner + 1] - A.colptr[inner]) > hentry.loc) { IT index = A.colptr[inner] + hentry.loc; mergeheap[hsize-1].loc = hentry.loc + 1; mergeheap[hsize-1].runr = hentry.runr; mergeheap[hsize-1].value = multop(A.values[index], B.values[hentry.runr]); mergeheap[hsize-1].key = A.rowids[index]; push_heap(mergeheap, mergeheap + hsize); } else { --hsize; } } } my_free<HeapEntry<IT, NT>>(mergeheap); } my_free<IT>(threadHeapSize); if (C.isEmpty()) { C.make_empty(); } // ************************ Copy output to C ************************************* C.rows = A.rows; C.cols = B.cols; C.colptr = my_malloc<IT>(C.cols + 1); C.colptr[0] = 0; IT *col_nz = my_malloc<IT>(C.cols); #pragma omp parallel for for (int i = 0; i < C.cols; ++i) { col_nz[i] = colEnd[i] - colStart[i]; } scan(col_nz, C.colptr, C.cols + 1); my_free<IT>(col_nz); C.nnz = C.colptr[C.cols]; C.rowids = my_malloc<IT>(C.nnz); C.values = my_malloc<NT>(C.nnz); #pragma omp parallel { int thisThread = omp_get_thread_num(); for(int i = colPerThread[thisThread]; i< colPerThread[thisThread + 1]; ++i) { // combine step copy(&LocalRowIdsofC[thisThread][colStart[i] - colStart[colPerThread[thisThread]]], &LocalRowIdsofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]], C.rowids + C.colptr[i]); copy(&LocalValuesofC[thisThread][colStart[i] - colStart[colPerThread[thisThread]]], &LocalValuesofC[thisThread][colEnd[i] - colStart[colPerThread[thisThread]]], C.values + C.colptr[i]); } } // ************************ Memory deallocation ************************************* #pragma omp parallel { int thisThread = omp_get_thread_num(); my_free<IT>(LocalRowIdsofC[thisThread]); my_free<NT>(LocalValuesofC[thisThread]); } my_free<IT*>(LocalRowIdsofC); my_free<NT*>(LocalValuesofC); my_free<IT>(colPerThread); my_free<IT>(colEnd); my_free<IT>(colStart); }
main.c
#include <stdint.h> #include "omp.h" #include "common.h" #define KERNEL_SIZE 3 #define NUM_CORES 8 //#define APP_DEBUG #define PROFILING #define CHECK #define IMG_WIDTH 24 #define IMG_HEIGHT 20 #ifndef NO_SHORT_PIXEL //#define SHORT_PIXEL #endif #ifndef NO_USE_INTRINSICS #define USE_INTRINSICS #endif #ifndef NO_USE_OMP #define USE_OMP #endif #define USE_TILING #ifdef USE_TILING #define PRECISE_TILING #define TILE_WIDTH 12 #define TILE_HEIGHT 10 #define W_TILES (IMG_WIDTH/TILE_WIDTH) #define H_TILES (IMG_HEIGHT/TILE_HEIGHT) #define N_TILES (W_TILES*H_TILES) #define BORDER_SIZE (KERNEL_SIZE/2)*4 // 4 = NxN filters in the pipeline #define BUFFER_WIDTH (TILE_WIDTH + BORDER_SIZE) #define BUFFER_HEIGHT (TILE_HEIGHT + BORDER_SIZE) #else #define BUFFER_WIDTH IMG_WIDTH #define BUFFER_HEIGHT IMG_HEIGHT #endif #ifdef SHORT_PIXEL typedef short pixel; typedef short coeff; typedef short vector __attribute__ ((vector_size (4))); typedef short coeff_vector __attribute__ ((vector_size (4))); inline vector vector_abs(vector x) { return __ABS2(x); } inline vector vector_max(vector x, vector y) { return __MAX2(x, y); } inline short max_element(vector x) { return x[0] > x[1]? x[0]: x[1]; } inline vector vector_min(vector x, vector y) { return __MIN2(x, y); } inline short min_element(vector x) { return x[0] < x[1]? x[0]: x[1]; } #define INIT_VECTOR(x) {x,x} #else typedef unsigned char pixel; typedef char coeff; typedef unsigned char vector __attribute__ ((vector_size (4))); typedef char coeff_vector __attribute__ ((vector_size (4))); typedef signed char signed_vector __attribute__ ((vector_size (4))); inline coeff_vector vector_abs(coeff_vector x) { return __ABS4(x); } inline coeff_vector vector_max(coeff_vector x, coeff_vector y) { return __MAX4(x, y); } inline unsigned char max_element(vector x) { unsigned char a = (x[0] > x[1])? x[0]: x[1]; unsigned char b = (x[2] > x[3])? x[2]: x[3]; return (a>b)? a: b; } inline coeff_vector vector_min(coeff_vector x, coeff_vector y) { return __MIN4(x, y); } inline unsigned char min_element(vector x) { unsigned char a = (x[0] < x[1])? x[0]: x[1]; unsigned char b = (x[2] < x[3])? x[2]: x[3]; return (a<b)? a: b; } #define INIT_VECTOR(x) {x,x,x,x} #endif #ifdef USE_INTRINSICS #define VECTOR_SIZE (sizeof(vector)/sizeof(pixel)) #endif //#include "data_5_5.h" #include "data_20_24.h" static void sum_image(pixel *restrict image_in1, pixel *restrict image_in2, unsigned width, unsigned height) { #ifdef USE_INTRINSICS vector *image_in1_vec = (vector *) image_in1; vector *image_in2_vec = (vector *) image_in2; const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < W; j++) image_in1_vec[i * W + j] += image_in2_vec[i * W + j]; #else for (unsigned i = 0; i < height; i++) { for (unsigned j = 0; j < width; j++) { image_in1[i * width + j] = (image_in1[i * width + j])+(image_in2[i * width + j]); } } #endif } static void sub_image(pixel *restrict image_in1, pixel *restrict image_in2, pixel *restrict image_out, unsigned width, unsigned height, unsigned in2_stride, unsigned in2_offset) { #ifdef USE_INTRINSICS vector *image_in1_vec = (vector *) image_in1; vector *image_in2_vec = (vector *) (image_in2+in2_offset); vector *image_out_vec = (vector *) image_out; const unsigned W = width/VECTOR_SIZE; const unsigned W2 = in2_stride/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) { for (unsigned j = 0; j < W; j++) { vector v = image_in2_vec[i * W2 + j]; //if(j == W-1 && width%4 != 0) { // v[2] = image_in2[(i+1) * in2_stride + in2_offset]; // v[3] = image_in2[(i+1) * in2_stride + in2_offset + 1]; // } image_out_vec[i * W + j] = vector_abs(image_in1_vec[i * W + j] - v); } } #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) { for (unsigned j = 0; j < width; j++) { int val = (int)image_in1[i * width + j] - (int)image_in2[i * in2_stride + in2_offset + j]; image_out[i * width + j] = (val>0? (short)val: (short)-val); } } #endif } static pixel max_image(pixel *restrict image_in, unsigned width, unsigned height) { pixel max_pix = 0; #ifdef USE_INTRINSICS vector *image_in_vec = (vector *) image_in; vector max_vector = INIT_VECTOR(0); const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < W; j++) max_vector = vector_max(image_in_vec[i * W + j], max_vector); max_pix = max_element(max_vector) ; #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < width; j++) if (image_in[i * width + j] > max_pix) max_pix = image_in[i * width + j]; #endif return max_pix; } static pixel max_sub_image(pixel *restrict image_in1, pixel *restrict image_in2, unsigned width, unsigned height, unsigned in2_stride, unsigned in2_offset) { pixel max_pix = 0; #ifdef USE_INTRINSICS vector *image_in1_vec = (vector *) image_in1; vector *image_in2_vec = (vector *) image_in2; vector max_vector = INIT_VECTOR(0); const unsigned W = width/VECTOR_SIZE; const unsigned W2 = in2_stride/VECTOR_SIZE; const unsigned off = in2_offset/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) { for (unsigned j = 0; j < W; j++){ vector v = vector_abs(image_in1_vec[i * W + j] - image_in2_vec[i * W2 + off + j]); max_vector = vector_max(v, max_vector); } } max_pix = max_element(max_vector); #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) { for (unsigned j = 0; j < width; j++) { int val = (int)image_in1[i * width + j] - (int)image_in2[i * in2_stride + in2_offset + j]; if (val < 0) val = -val; if (val > max_pix) max_pix = val; } } #endif return max_pix; } static pixel min_image(pixel *restrict image_in, unsigned width, unsigned height) { pixel min_pix = 0; #ifdef USE_INTRINSICS vector *image_in_vec = (vector *) image_in; vector min_vector = INIT_VECTOR(0); const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < W; j++) min_vector = vector_min(image_in_vec[i * W + j], min_vector); min_pix = min_element(min_vector) ; #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < width; j++) if (image_in[i * width + j] < min_pix) min_pix = image_in[i * width + j]; #endif return min_pix; } static void binarisation(pixel *restrict image_in, pixel thresh, pixel val_max, pixel val_min, unsigned width, unsigned height) { #ifdef USE_INTRINSICS vector *image_in_vec = (vector *) image_in; vector Vth = INIT_VECTOR(thresh-1); vector Vones = INIT_VECTOR(1); vector C = INIT_VECTOR(val_max-val_min); vector Vmin = INIT_VECTOR(val_min); const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < W; j++) { vector V0 = vector_max(image_in_vec[i * W + j], Vth); vector V1 = V0 - Vth; V0 = vector_min(V1, Vones); image_in_vec[i * W + j] = V0 * C + Vmin; //image_in_vec[i * W + j] = (vector)_mac4((signed_vector)V0, (signed_vector)C, (signed_vector)Vmin); } #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < width; j++) if (image_in[i * width + j] < thresh) image_in[i * width + j] = val_min; else image_in[i * width + j] = val_max; #endif } static void erosion(pixel *restrict image_in, pixel *restrict image_out, unsigned width, unsigned height) { #ifdef USE_INTRINSICS #ifdef SHORT_PIXEL vector V11, V12, V21, V22, V31, V32; pixel min1; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned y = 1; y < width - 2; y+=2) { V21 = *((vector *)&image_in[y-1]); V22 = *((vector *)&image_in[y+1]); V31 = *((vector *)&image_in[width + y-1]); V32 = *((vector *)&image_in[width + y+1]); for (unsigned x = 1; x < height - 1; x++) { V11 = V21; V12 = V22; V21 = V31; V22 = V32; V31 = *((vector *)&image_in[(x+1)*width + y-1]); V32 = *((vector *)&image_in[(x+1)*width + y+1]); V11 = vector_min(V11, V21); V12 = vector_min(V12, V22); V11 = vector_min(V11, V31); V12 = vector_min(V12, V32); min1 = ((V11[1] < V12[0])? V11[1]: V12[0]); image_out[x*width + y] = ((V11[0] < min1)? V11[0]: min1); image_out[x*width + y+1] = ((V12[1] < min1)? V12[1]: min1); } } #else vector V1, V2, V3; pixel min1; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned y = 1; y < width - 2; y+=2) { V2 = *((vector *)&image_in[y-1]); V3 = *((vector *)&image_in[width + y-1]); for (unsigned x = 1; x < height - 1; x++) { V1 = V2; V2 = V3; V3 = *((vector *)&image_in[(x+1)*width + y-1]); V1 = vector_min(V1, V2); V1 = vector_min(V1, V3); min1 = ((V1[1] < V1[2])? V1[1]: V1[2]); image_out[x*width + y] = ((V1[0] < min1)? V1[0]: min1); image_out[x*width + y+1] = ((V1[3] < min1)? V1[3]: min1); } } #endif // SHORT_PIXEL #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned x = 1; x < height - 1; x++) { for (unsigned y = 1; y < width - 1; y++) { pixel min = 255; for (unsigned i = 0; i < KERNEL_SIZE; i++) for (unsigned j = 0; j < KERNEL_SIZE; j++) if (image_in[(x-1) * width + (y-1) + (i*width + j)] < min) min = image_in[(x-1) * width + (y-1) + (i*width + j)]; image_out[x*width + y] = min; } } #endif /* Boundary Copy */ #if defined(USE_INTRINSICS) && (!defined(USE_TILING) || defined(TILE_ALIGMENT)) vector *image_in_vec = (vector *) image_in; vector *image_out_vec = (vector *) image_out; const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < W; k++) { image_out_vec[k] = image_in_vec[k]; image_out_vec[(height-1) * W + k] = image_in_vec[(height-1) * W + k]; } #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < width; k++) { image_out[k] = image_in[k]; image_out[(height-1) * width + k] = image_in[(height-1) * width + k]; } #endif #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < height; k++) { image_out[k*width] = image_in[k*width]; image_out[k*width + width - 1] = image_in[k*width + width - 1]; } } static void dilatation(pixel *restrict image_in, pixel *restrict image_out, unsigned width, unsigned height) { #ifdef USE_INTRINSICS #ifdef SHORT_PIXEL vector V11, V12, V21, V22, V31, V32; pixel max1; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned y = 1; y < width - 2; y+=2) { V21 = *((vector *)&image_in[y-1]); V22 = *((vector *)&image_in[y+1]); V31 = *((vector *)&image_in[width + y-1]); V32 = *((vector *)&image_in[width + y+1]); for (unsigned x = 1; x < height - 1; x++) { V11 = V21; V12 = V22; V21 = V31; V22 = V32; V31 = *((vector *)&image_in[(x+1)*width + y-1]); V32 = *((vector *)&image_in[(x+1)*width + y+1]); V11 = vector_max(V11, V21); V12 = vector_max(V12, V22); V11 = vector_max(V11, V31); V12 = vector_max(V12, V32); max1 = ((V11[1] > V12[0])? V11[1]: V12[0]); image_out[x*width + y] = ((V11[0] > max1)? V11[0]: max1); image_out[x*width + y+1] = ((V12[1] > max1)? V12[1]: max1); } } #else vector V1, V2, V3; pixel max1; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned y = 1; y < width - 2; y+=2) { V2 = *((vector *)&image_in[y-1]); V3 = *((vector *)&image_in[width + y-1]); for (unsigned x = 1; x < height - 1; x++) { V1 = V2; V2 = V3; V3 = *((vector *)&image_in[(x+1)*width + y-1]); V1 = vector_max(V1, V2); V1 = vector_max(V1, V3); max1 = ((V1[1] > V1[2])? V1[1]: V1[2]); image_out[x*width + y] = ((V1[0] > max1)? V1[0]: max1); image_out[x*width + y+1] = ((V1[3] > max1)? V1[3]: max1); } } #endif // SHORT_PIXEL #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned x = 1; x < height - 1; x++) { for (unsigned y = 1; y < width - 1; y++) { pixel max = 0; for (unsigned i = 0; i < KERNEL_SIZE; i++) for (unsigned j = 0; j < KERNEL_SIZE; j++) if (image_in[(x-1) * width + (y-1) + (i*width + j)] > max) max = image_in[(x-1) * width + (y-1) + (i*width + j)]; image_out[x*width + y] = max; } } #endif /* Boundary Copy */ #if defined(USE_INTRINSICS) && (!defined(USE_TILING) || defined(TILE_ALIGMENT)) vector *image_in_vec = (vector *) image_in; vector *image_out_vec = (vector *) image_out; const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < W; k++) { image_out_vec[k] = image_in_vec[k]; image_out_vec[(height-1) * W + k] = image_in_vec[(height-1) * W + k]; } #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < width; k++) { image_out[k] = image_in[k]; image_out[(height-1) * width + k] = image_in[(height-1) * width + k]; } #endif #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < height; k++) { image_out[k*width] = image_in[k*width]; image_out[k*width + width - 1] = image_in[k*width + width - 1]; } } static void convolution_rect(pixel *restrict image_in, coeff *restrict kernel, pixel *restrict image_out, unsigned width, unsigned height) { #ifdef USE_INTRINSICS #ifdef SHORT_PIXEL vector V11, V12, V21, V22, V31, V32; coeff_vector C11l, C12l, C21l, C22l, C31l, C32l, C11r, C12r, C21r, C22r, C31r, C32r; vector temp; C11l = *((vector *)kernel); C12l = *((vector *)(kernel+2)); temp = *((vector *)(kernel+4)); C21l[0] = C12l[1]; C12l[1] = 0; C21l[1] = temp[0]; C22l[0] = temp[1]; C22l[1] = 0; C31l = *((vector *)(kernel+6)); C32l[0] = kernel[8]; C32l[1] = 0; vector mask1 = {3,0}; vector mask2 = {1,2}; C11r = __builtin_shuffle(C11l, C12l, mask1); C12r = __builtin_shuffle(C11l, C12l, mask2); C21r = __builtin_shuffle(C21l, C22l, mask1); C22r = __builtin_shuffle(C21l, C22l, mask2); C31r = __builtin_shuffle(C31l, C32l, mask1); C32r = __builtin_shuffle(C31l, C32l, mask2); int val; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned y = 1; y < width - 2; y+=2) { V21 = *((vector *)&image_in[y-1]); V22 = *((vector *)&image_in[y+1]); V31 = *((vector *)&image_in[width + y-1]); V32 = *((vector *)&image_in[width + y+1]); for (unsigned x = 1; x < height - 1; x++) { V11 = V21; V12 = V22; V21 = V31; V22 = V32; V31 = *((vector *)&image_in[(x+1)*width + y-1]); V32 = *((vector *)&image_in[(x+1)*width + y+1]); val = __builtin_pulp_dotsp2(V11, C11l); val = __builtin_pulp_sdotsp2(V12, C12l, val); val = __builtin_pulp_sdotsp2(V21, C21l, val); val = __builtin_pulp_sdotsp2(V22, C22l, val); val = __builtin_pulp_sdotsp2(V31, C31l, val); val = __builtin_pulp_sdotsp2(V32, C32l, val); image_out[x*width + y] = (val>0? (short)val: (short)-val); val = __builtin_pulp_dotsp2(V11, C11r); val = __builtin_pulp_sdotsp2(V12, C12r, val); val = __builtin_pulp_sdotsp2(V21, C21r, val); val = __builtin_pulp_sdotsp2(V22, C22r, val); val = __builtin_pulp_sdotsp2(V31, C31r, val); val = __builtin_pulp_sdotsp2(V32, C32r, val); image_out[x*width + y+1] = (val>0? (short)val: (short)-val);; } } #else vector V1, V2, V3; vector C1l, C2l, C3l, C1r, C2r, C3r; C1l = *((vector *)kernel); C1l[3] = 0; C2l = *((vector *)(kernel+3)); C2l[3] = 0; C3l = *((vector *)(kernel+6)); C3l[3] = 0; vector mask = {3,0,1,2}; C1r = __builtin_shuffle(C1l, mask); C2r = __builtin_shuffle(C2l, mask); C3r = __builtin_shuffle(C3l, mask); short val; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned y = 1; y < width - 2; y+=2) { V2 = *((vector *)&image_in[y-1]); V3 = *((vector *)&image_in[width + y-1]); for (unsigned x = 1; x < height - 1; x++) { V1 = V2; V2 = V3; V3 = *((vector *)&image_in[(x+1)*width + y-1]); val = __builtin_pulp_dotsp4(V1, C1l); val = __builtin_pulp_sdotsp4(V2, C2l, val); val = __builtin_pulp_sdotsp4(V3, C3l, val); image_out[x*width + y] = (val>0? (short)val: (short)-val);; val = __builtin_pulp_dotsp4(V1, C1r); val = __builtin_pulp_sdotsp4(V2, C2r, val); val = __builtin_pulp_sdotsp4(V3, C3r, val); image_out[x*width + y+1] = (val>0? (short)val: (short)-val);; } } #endif // SHORT_PIXEL #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned x = 1; x < height - 1; x++) { for (unsigned y = 1; y < width - 1; y++) { int val = 0; for (unsigned i = 0; i < KERNEL_SIZE; i++) { for (unsigned j = 0; j < KERNEL_SIZE; j++) { val += (int)image_in[(x-1) * width + (y-1) + i*width + j] * (int)kernel[i*KERNEL_SIZE + j]; } } image_out[x * width + y] = (val>0? (short)val: (short)-val); } } #endif /* Boundary Copy */ #if defined(USE_INTRINSICS) && (!defined(USE_TILING) || defined(TILE_ALIGMENT)) vector *image_in_vec = (vector *) image_in; vector *image_out_vec = (vector *) image_out; const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < W; k++) { image_out_vec[k] = image_in_vec[k]; image_out_vec[(height-1) * W + k] = image_in_vec[(height-1) * W + k]; } #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < width; k++) { image_out[k] = image_in[k]; image_out[(height-1) * width + k] = image_in[(height-1) * width + k]; } #endif #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned k = 0; k < height; k++) { image_out[k*width] = image_in[k*width]; image_out[k*width + width - 1] = image_in[k*width + width - 1]; } } static void multiply(pixel *restrict image_in1, pixel *restrict image_in2, unsigned width, unsigned height) { #ifdef USE_INTRINSICS vector *image_in1_vec = (vector *) image_in1; vector *image_in2_vec = (vector *) image_in2; const unsigned W = width/VECTOR_SIZE; #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < W; j++) image_in1_vec[i * W + j] *= image_in2_vec[i * W + j]; #else #ifdef USE_OMP #pragma omp for schedule(static) #endif for (unsigned i = 0; i < height; i++) for (unsigned j = 0; j < width; j++) image_in1[i*width + j] = image_in1[i*width + j] * image_in2[i*width + j]; #endif } static int check_image(pixel *restrict output, pixel *restrict golden, unsigned width, unsigned height) { unsigned errors = 0; for (unsigned i = 0; i < height * width; i++) if (output[i] != golden[i]) { errors++; printf("Error@%d -> %d |= %d\n", i, output[i], golden[i]); } /* if (errors == 0) */ /* printf("OOOOOOK!!!!!!\n"); */ /* else */ /* printf("ERROR!!!! %d\n", errors); */ return errors; } static void print_image(pixel *restrict test, unsigned width, unsigned height) { for (unsigned y = 0; y < height; y++) { for (unsigned x = 0; x < width; x++) printf("%3x", (unsigned char) test[width * y + x]); printf("\n"); } printf("\n"); } // L2 data coeff sobel1_l2[9] = {1,2,1,0,0,0,-1,-2,-1}; coeff sobel2_l2[9] = {1,0,-1,2,0,-2,1,0,-1}; pixel result[IMG_HEIGHT][IMG_WIDTH]; // L1 data GAP_L1_DATA coeff sobel1[9]; GAP_L1_DATA coeff sobel2[9]; GAP_L1_DATA pixel image_back[IMG_HEIGHT][IMG_WIDTH]; #ifdef USE_TILING GAP_L1_DATA pixel (*image_in)[BUFFER_WIDTH]; GAP_L1_DATA pixel (*image_out)[BUFFER_WIDTH]; GAP_L1_DATA pixel image_in_buffers[2][BUFFER_HEIGHT][BUFFER_WIDTH]; GAP_L1_DATA pixel image_out_buffers[2][TILE_HEIGHT][TILE_WIDTH]; #else GAP_L1_DATA pixel image_in[BUFFER_HEIGHT][BUFFER_WIDTH]; GAP_L1_DATA pixel image_out[BUFFER_HEIGHT][BUFFER_WIDTH]; #endif GAP_L1_DATA pixel image_temp0[BUFFER_HEIGHT][BUFFER_WIDTH]; GAP_L1_DATA pixel image_temp1[BUFFER_HEIGHT][BUFFER_WIDTH]; GAP_L1_DATA pixel image_temp2[BUFFER_HEIGHT][BUFFER_WIDTH]; int main(int argc, char *argv[]) { int time = 0; int max_pixel = 0; unsigned int width = IMG_WIDTH; unsigned int height = IMG_HEIGHT; dma_req_t input_job_id; dma_req_t output_job_id[2]; #ifdef USE_OMP int max_local[NUM_CORES]; #endif #ifdef USE_TILING unsigned int buffer_id = 0, next_buffer_id; unsigned int tile_offset[N_TILES], out_tile_offset[N_TILES], tile_width[N_TILES], tile_height[N_TILES], l_border[N_TILES], t_border[N_TILES]; #endif /* printf("Initializing Motion Detection Application Data Structures... \n"); */ #ifdef USE_TILING for(unsigned i=0, k=0; i<H_TILES; ++i) { for(unsigned j=0; j<W_TILES; ++j) { t_border[k] = (i>0? BORDER_SIZE: 0); l_border[k] = (j>0? BORDER_SIZE: 0); unsigned int w_borders = (j>0? BORDER_SIZE: 0) + (j<W_TILES-1? BORDER_SIZE: 0); unsigned int h_borders = (i>0? BORDER_SIZE: 0) + (i<H_TILES-1? BORDER_SIZE: 0); tile_offset[k] = (IMG_WIDTH*(TILE_HEIGHT-t_border[k]))*i + TILE_WIDTH*j - l_border[k]; out_tile_offset[k] = (IMG_WIDTH*TILE_HEIGHT)*i + TILE_WIDTH*j; tile_width[k] = TILE_WIDTH + w_borders; tile_height[k] = TILE_HEIGHT + h_borders; //printf("(%d, %d) -> %d (%d x %d)\n", i, j, tile_offset[k], tile_width[k], tile_height[k]); ++k; } } #endif // INITIALIZING SOBEL VECTORS for (unsigned i = 0; i < 9; i++) { sobel1[i] = sobel1_l2[i]; sobel2[i] = sobel2_l2[i]; } // INITIALIZING BACKGROUND memcpy_async(image_back, Y_back, IMG_WIDTH*IMG_HEIGHT*sizeof(pixel), &input_job_id); // INITIALIZING IMAGE #ifdef USE_TILING // Get first input tile (wait) memcpy_async2D(&image_in_buffers[0][0][0], (pixel *)Y_test, tile_width[0]*tile_height[0]*sizeof(pixel), IMG_WIDTH*sizeof(pixel), tile_width[0]*sizeof(pixel), &input_job_id); memcpy_wait(&input_job_id); // Get second input tile (no wait) memcpy_async2D(&image_in_buffers[1][0][0], (pixel *)Y_test+tile_offset[1], tile_width[1]*tile_height[1]*sizeof(pixel), IMG_WIDTH*sizeof(pixel), tile_width[1]*sizeof(pixel), &input_job_id); #else // Get full input image memcpy_async(image_in, Y_test, IMG_WIDTH*IMG_HEIGHT*sizeof(pixel), &input_job_id); memcpy_wait(&input_job_id); #endif #ifdef APP_DEBUG print_image((pixel *)image_back, IMG_WIDTH, IMG_HEIGHT); print_image((pixel *)Y_test, IMG_WIDTH, IMG_HEIGHT); #endif /* printf("Starting Motion Detection Application \n"); */ for(int iter = 0; iter < 3; ++iter) { #ifdef PROFILING profile_start(iter); #endif #ifdef USE_TILING // Tiling loop for(unsigned k=0; k<N_TILES; ++k) { // Tile size width = tile_width[k]; height = tile_height[k]; // Double buffering status image_in = image_in_buffers[buffer_id]; image_out = image_out_buffers[buffer_id]; //printf(">>>>>>>>>> IMAGE IN (k = %d) \n", k); //print_image((pixel *)image_in, width, height); #ifdef PRECISE_TILING #ifdef USE_OMP #pragma omp parallel num_threads(NUM_CORES) { max_local[omp_get_thread_num()] = max_sub_image((pixel *)image_in, (pixel *)image_back, width, height, IMG_WIDTH, tile_offset[k]);; } for(unsigned i=0; i<NUM_CORES; ++i) max_pixel = ((max_pixel > max_local[i])? max_pixel: max_local[i]); #else int new_val = max_sub_image((pixel *)image_in, (pixel *)image_back, width, height, IMG_WIDTH, tile_offset[k]); if(new_val > max_pixel) max_pixel = new_val; #endif next_buffer_id = (buffer_id==0? 1: 0); if(k < N_TILES-1) memcpy_wait(&input_job_id); if(k < N_TILES-2) { memcpy_async2D(&image_in_buffers[buffer_id][0][0], (pixel *)Y_test+tile_offset[k+2], tile_width[k+2]*tile_height[k+2]*sizeof(pixel), IMG_WIDTH*sizeof(pixel), tile_width[k+2]*sizeof(pixel), &input_job_id); } buffer_id = next_buffer_id; } // Get first input tile (wait) memcpy_async2D(&image_in_buffers[0][0][0], (pixel *)Y_test, tile_width[0]*tile_height[0]*sizeof(pixel), IMG_WIDTH*sizeof(pixel), tile_width[0]*sizeof(pixel), &input_job_id); memcpy_wait(&input_job_id); // Get second input tile (no wait) memcpy_async2D(&image_in_buffers[1][0][0], (pixel *)Y_test+tile_offset[1], tile_width[1]*tile_height[1]*sizeof(pixel), IMG_WIDTH*sizeof(pixel), tile_width[1]*sizeof(pixel), &input_job_id); buffer_id = 0; for(unsigned k=0; k<N_TILES; ++k) { // Tile size width = tile_width[k]; height = tile_height[k]; // Double buffering status image_in = image_in_buffers[buffer_id]; image_out = image_out_buffers[buffer_id]; #endif // PRECISE_TILING #endif #ifdef USE_OMP #ifdef USE_TILING #pragma omp parallel num_threads(NUM_CORES) default(shared) firstprivate(width, height, buffer_id, k) #else #pragma omp parallel num_threads(NUM_CORES) default(shared) firstprivate(width, height) #endif { #endif #ifdef APP_DEBUG printf("Sub image \n"); #endif #ifdef USE_TILING sub_image((pixel *)image_in, (pixel *)image_back, (pixel *)image_temp0, width, height, IMG_WIDTH, tile_offset[k]); #else sub_image((pixel *)image_in, (pixel *)image_back, (pixel *)image_temp0, width, height, IMG_WIDTH, 0); //val_abs((pixel*) image_temp0); #endif #ifndef PRECISE_TILING #ifdef USE_OMP max_local[omp_get_thread_num()] = max_image((pixel *)image_temp0, width, height); #pragma omp single { for(unsigned i=0; i<NUM_CORES; ++i) max_pixel = ((max_pixel > max_local[i])? max_pixel: max_local[i]); } #else max_pixel = max_image((pixel *)image_temp0, width, height); #endif #endif #ifdef APP_DEBUG print_image((pixel *)image_temp0, width, height); printf("Max pixel: %x \n", max_pixel); printf("Binarization \n"); #endif binarisation((pixel *)image_temp0, (max_pixel * 3 / 10), 1, 0, width, height); #ifdef APP_DEBUG print_image((pixel *)image_temp0, width, height); printf("Erosion \n"); #endif erosion((pixel *)image_temp0, (pixel *)image_temp1, width, height); #ifdef APP_DEBUG print_image((pixel *)image_temp1, width, height); printf("Dilatation \n"); #endif dilatation((pixel *)image_temp1, (pixel *)image_temp2, width, height); #ifdef APP_DEBUG print_image((pixel *)image_temp2, width, height); printf("Sobel Convolution \n"); #endif convolution_rect((pixel *)image_temp2, sobel1, (pixel *)image_temp0, width, height); //val_abs((pixel*) image_temp0, width, height); convolution_rect((pixel *)image_temp2, sobel2, (pixel *)image_temp1, width, height); //val_abs((pixel*) image_back, width, height); sum_image((pixel *)image_temp0, (pixel *)image_temp1, width, height); binarisation((pixel *)image_temp0, 1, 0, 1, width, height); #ifdef APP_DEBUG print_image((pixel *)image_temp0, width, height); printf("Final Multiplication \n"); #endif multiply((pixel *)image_temp0, (pixel *)image_in, width, height); #ifdef APP_DEBUG print_image((pixel *)image_temp0, width, height); #endif #ifdef USE_TILING for(unsigned i = 0; i<TILE_HEIGHT; ++i) for(unsigned j = 0; j<TILE_WIDTH; ++j) image_out_buffers[buffer_id][i][j] = image_temp0[t_border[k]+i][l_border[k]+j]; #endif #ifdef USE_OMP } // parallel region #endif #ifdef USE_TILING next_buffer_id = (buffer_id==0?1: 0); if(k < N_TILES-1) memcpy_wait(&input_job_id); if(k > 0) memcpy_wait(&output_job_id[next_buffer_id]); if(k < N_TILES-2) { memcpy_async2D(&image_in_buffers[buffer_id][0][0], (pixel *)Y_test+tile_offset[k+2], tile_width[k+2]*tile_height[k+2]*sizeof(pixel), IMG_WIDTH*sizeof(pixel), tile_width[k+2]*sizeof(pixel), &input_job_id); } memcpy_async2D((pixel *)result+out_tile_offset[k], &image_out_buffers[buffer_id][0][0], TILE_WIDTH*TILE_HEIGHT*sizeof(pixel), IMG_WIDTH*sizeof(pixel), TILE_WIDTH*sizeof(pixel), &output_job_id[buffer_id]); buffer_id = next_buffer_id; } // tiling loop memcpy_wait(&output_job_id[1]); #else memcpy_async(result, image_temp0, IMG_WIDTH*IMG_HEIGHT*sizeof(pixel), &output_job_id[0]); memcpy_wait(&output_job_id[0]); #endif #ifdef PROFILING profile_stop(iter); #endif #ifdef CHECK if (check_image((pixel *)result, (pixel *)Y_golden, width, height)) return -1; #endif }//iter profile_show(); /* printf("Motion Detection Application Complete!!!\n"); */ return 0; }
target-34.c
extern void abort (void); int main () { int a = 1, b = 2, c = 4, d[7]; #pragma omp parallel { #pragma omp single { #pragma omp taskgroup { #pragma omp target enter data nowait map (to: a, b, c) depend(out: d[0]) #pragma omp target nowait map (alloc: a, b) depend(in: d[0]) depend(out: d[1]) { #pragma omp atomic update a |= 4; #pragma omp atomic update b |= 8; } #pragma omp target nowait map (alloc: a, c) depend(in: d[0]) depend(out: d[2]) { #pragma omp atomic update a |= 16; #pragma omp atomic update c |= 32; } #pragma omp target exit data nowait map (from: a, b, c) depend(in: d[1], d[2]) } if (a != 21 || b != 10 || c != 36) abort (); #pragma omp target map (tofrom: a, b) nowait { a &= ~16; b &= ~2; } #pragma omp target map (tofrom: c) nowait { c |= 8; } } /* Implicit barrier here. */ #pragma omp single { if (a != 5 || b != 8 || c != 44) abort (); #pragma omp target map (tofrom: a, b) nowait { a |= 32; b |= 4; } #pragma omp target map (tofrom: c) nowait c &= ~4; #pragma omp taskwait if (a != 37 || b != 12 || c != 40) abort (); #pragma omp target nowait map (tofrom: a, b) depend(out: d[3]) { #pragma omp atomic update a = a + 9; b -= 8; } #pragma omp target nowait map (tofrom: a, c) depend(out: d[4]) { #pragma omp atomic update a = a + 4; c >>= 1; } #pragma omp task if (0) depend (in: d[3], d[4]) shared (a, b, c) if (a != 50 || b != 4 || c != 20) abort (); #pragma omp task shared (a) a += 50; #pragma omp target nowait map (tofrom: b) b++; #pragma omp target map (tofrom: c) nowait c--; #pragma omp taskwait if (a != 100 || b != 5 || c != 19) abort (); #pragma omp target map (tofrom: a) nowait depend(out: d[5]) a++; #pragma omp target map (tofrom: b) nowait depend(out: d[6]) b++; #pragma omp target map (tofrom: a, b) depend(in: d[5], d[6]) { if (a != 101 || b != 6) a = -9; else { a = 24; b = 38; } } if (a != 24 || b != 38) abort (); } /* Implicit barrier here. */ #pragma omp master { #pragma omp target nowait map (tofrom: a, b) { a *= 2; b++; } #pragma omp target map (tofrom: c) nowait c--; } #pragma omp barrier if (a != 48 || b != 39 || c != 18) abort (); } return 0; }
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
helloworld.c
#include <stdio.h> #include <omp.h> int main(void) { int isHost = 1; int host_val = 0; int* host_val_ptr = &host_val; int rc; #pragma omp target map(tofrom: isHost) is_device_ptr(host_val_ptr) { isHost = omp_is_initial_device(); printf("Hello world. %d\n", 100); for (int i =0; i<5; i++) { printf("Hello world. iteration %d\n", i); } printf("123456789%n should write 9 to host_val_ptr\n", host_val_ptr); } rc = isHost; if (host_val != 9) rc++ ; printf("Target region executed on the %s\n", isHost ? "host" : "device"); printf("The value of host_val from device printf is %d\n",host_val); printf("12345678%n should write 8 to host_val_ptr\n", host_val_ptr); printf("The value of host_val from host printf is %d\n",host_val); if (host_val != 8) rc++ ; return rc; }
GB_unaryop__ainv_uint32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_fp64 // op(A') function: GB_tran__ainv_uint32_fp64 // C type: uint32_t // A type: double // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint32_t z ; GB_CAST_UNSIGNED(z,aij,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_fp64 ( uint32_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
guess.c
#include <stdio.h> #include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include <ktime.h> #include <geometry.h> #ifdef __USE_HW_COUNTER #include <perf.h> #include <kperf.h> #endif #include <phy.h> inline void iguess(struct igtbl *restrict ig) { #ifdef __USE_HW_COUNTER const struct fd fd = ig->perf_counters->fd; struct counters start; perf_read(fd, &start); const uint64_t icycle = __rdtsc(); #endif struct ktime ktime; setktime(&ktime); const size_t sz = ig->sz; const size_t bsz = ig->bsz; struct ivals *restrict iv = ig->iv; double *restrict q0 = ig->q0; double *restrict q1 = ig->q1; double conv = ALPHA / (180.f / M_PI); iv->p = 1.f; /* Pressure */ iv->u = cos(conv); /* Velocity */ iv->v = sin(conv); /* Velocity */ iv->w = 0.f; /* Velocity */ uint32_t i; #pragma omp parallel for for(i = 0; i < sz; i++) { #ifdef __USE_COMPRESSIBLE_FLOW #else q0[i * bsz + 0] = iv->p; q0[i * bsz + 1] = iv->u; q0[i * bsz + 2] = iv->v; q0[i * bsz + 3] = iv->w; q1[i * bsz + 0] = iv->p; q1[i * bsz + 1] = iv->u; q1[i * bsz + 2] = iv->v; q1[i * bsz + 3] = iv->w; #endif } compute_time(&ktime, &ig->t->iguess); #ifdef __USE_HW_COUNTER const uint64_t cycle = __rdtsc() - icycle; struct counters end; perf_read(fd, &end); struct tot tot; perf_calc(start, end, &tot); ig->perf_counters->ctrs->setup.cycles += cycle; ig->perf_counters->ctrs->setup.tot.imcR += tot.imcR; ig->perf_counters->ctrs->setup.tot.imcW += tot.imcW; ig->perf_counters->ctrs->setup.tot.edcR += tot.edcR; ig->perf_counters->ctrs->setup.tot.edcW += tot.edcW; #endif }
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} int volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // 1- are we already in a parallel session? // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Index size = transpose ? rows : cols; // 2- compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned Index max_threads = std::max<Index>(1,size / 32); // 3 - compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), max_threads); if(threads==1) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
convolution_4x4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv4x4s4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 4*outw + w*3; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*16 + q*16; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0+4); float32x4_t _k891011 = vld1q_f32(kernel0+8); float32x4_t _k12131415 = vld1q_f32(kernel0+12); #else const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v7.4s}, [%1] \n" // v7 = outptr "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "prfm pldl1keep, [%4, #512] \n" "prfm pldl1keep, [%5, #512] \n" "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v6.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v14.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v6.4s \n" // Move to here to enhance ILP "ld1 {v8.4s}, [%2], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%3], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%5], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v15.4s, v12.4s, v13.4s \n" // "faddp v5.4s , v5.4s, v6.4s \n" // Move this line upward. "faddp v14.4s, v14.4s, v15.4s \n" "faddp v5.4s , v5.4s, v14.4s \n" "fadd v7.4s, v7.4s, v5.4s \n" "st1 {v7.4s}, [%1], #16 \n" "prfm pldl1keep, [%1, #128] \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "0: \n" "pld [%2, #512] \n" "pld [%3, #512] \n" "vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "pld [%4, #512] \n" "pld [%5, #512] \n" "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q5, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q6, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q14, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q15, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d28, d28, d29 \n" "vadd.f32 d11, d12, d13 \n" "vadd.f32 d29, d30, d31 \n" "vpadd.f32 d10, d10, d11 \n" "vpadd.f32 d11, d28, d29 \n" "vadd.f32 q7, q7, q5 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float sum = 0.f; asm volatile( "ld1 {v8.4s}, [%0], #16 \n"// v8 = r0 "ld1 {v9.4s}, [%1], #16 \n"// v9 = r1 "fmul v12.4s, v8.4s, %9.4s \n" "fmul v13.4s, v9.4s, %10.4s \n" "ld1 {v10.4s}, [%2], #16 \n"// v10 = r2 "ld1 {v11.4s}, [%3], #16 \n"// v11 = r3 "fmla v12.4s, v10.4s, %11.4s \n" "fmla v13.4s, v11.4s, %12.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v5.4s \n" "faddp s5, v5.2s \n" "fmov %w4, s5 \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13" ); *outptr += sum; #else float sum = 0.f; asm volatile( "vld1.f32 {d16-d17}, [%0]! \n"// q8 = r0 "vld1.f32 {d18-d19}, [%1]! \n"// q9 = r1 "vmul.f32 q12, q8, %q9 \n" "vmul.f32 q13, q9, %q10 \n" "vld1.f32 {d20-d21}, [%2]! \n"// q10 = r2 "vld1.f32 {d22-d23}, [%3]! \n"// q11 = r3 "vmla.f32 q12, q10, %q11 \n" "vmla.f32 q13, q11, %q12 \n" "vadd.f32 q5, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vpadd.f32 d10, d10, d10 \n" "vmov.f32 %4, d10[0] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "q5", "q6", "q8", "q9", "q10", "q11", "q12", "q13" ); *outptr += sum; #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; *outptr += sum; r0 += 4; r1 += 4; r2 += 4; r3 += 4; #endif // __ARM_NEON outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } } }
zeroslike_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_zeroslike_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = input_tensor->data; float* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = 0; } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = 0.f; } } return 0; } return -1; } int ref_zeroslike_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { uint8_t* input_data = input_tensor->data; uint8_t* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = 0; } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; uint8_t* input_data = input_tensor->data; uint8_t* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { uint8_t* src = input_data + c_step * q; uint8_t* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = 0; } } return 0; } return -1; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { // exec_node->inplace_map_num = 0; return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int layout = ir_graph->graph_layout; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_zeroslike_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_zeroslike_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_zeroslike_ref_op(void* arg) { return register_builtin_node_ops(OP_ZEROSLIKE, &hcl_node_ops); } int unregister_zeroslike_ref_op(void* arg) { return unregister_builtin_node_ops(OP_ZEROSLIKE, &hcl_node_ops); }
DRB096-doall2-taskloop-collapse-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: Two loops are associated with omp taskloop due to collapse(2). Both loop index variables are private. taskloop requires OpenMP 4.5 compilers. */ #include <stdio.h> #include <omp.h> int a[100][100]; int main() { int i; int j; { #pragma omp parallel for private (i,j) for (i = 0; i <= 99; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { a[i][j] += 1; } } } printf("a[50][50]=%d\n",a[50][50]); return 0; }
Example_SIMD.2.c
/* * @@name: SIMD.2c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_4.0 */ #include <stdio.h> #pragma omp declare simd uniform(fact) double add1(double a, double b, double fact) { double c; c = a + b + fact; return c; } #pragma omp declare simd uniform(a,b,fact) linear(i:1) double add2(double *a, double *b, int i, double fact) { double c; c = a[i] + b[i] + fact; return c; } #pragma omp declare simd uniform(fact) linear(a,b:1) double add3(double *a, double *b, double fact) { double c; c = *a + *b + fact; return c; } void work( double *a, double *b, int n ) { int i; double tmp; #pragma omp simd private(tmp) for ( i = 0; i < n; i++ ) { tmp = add1( a[i], b[i], 1.0); a[i] = add2( a, b, i, 1.0) + tmp; a[i] = add3(&a[i], &b[i], 1.0); } } int main(){ int i; const int N=32; double a[N], b[N]; for ( i=0; i<N; i++ ) { a[i] = i; b[i] = N-i; } work(a, b, N ); for ( i=0; i<N; i++ ) { printf("%d %f\n", i, a[i]); } return 0; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-12,16),ceild(4*t2-Nz-19,32));t3<=min(min(floord(4*Nt+Ny-9,32),floord(2*t1+Ny-3,32)),floord(4*t2+Ny-9,32));t3++) { for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(32*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(32*t3+Nx+19,64));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; /// This bit is set only for the Stmts that are the structured-block of /// OpenMP executable directives. Directives that have a structured block /// are called "non-standalone" directives. /// I.e. those returned by OMPExecutableDirective::getStructuredBlock(). unsigned IsOMPStructuredBlock : 1; }; enum { NumStmtBits = 9 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = NumStmtBits + 9 }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is trail-allocated. unsigned ResultKind : 2; /// Kind of Result as defined by APValue::Kind unsigned APValueKind : 4; /// When ResultKind == RSK_Int64. whether the trail-allocated integer is /// signed. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated /// integer. 7 bits because it is the minimal number of bit to represent a /// value from 0 to 64 (the size of the trail-allocated number). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the /// destructor on the trail-allocated APValue. unsigned HasCleanup : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum { NumBoundsCheckKindBits = 2 }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; unsigned BoundsCheckKind : NumBoundsCheckKindBits; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; unsigned BoundsCheckKind : NumBoundsCheckKindBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. unsigned BoundsSafeInterface : 1; /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 3; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 3; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; enum { NumBoundsExprKindBits = 3 }; class BoundsExprBitfields { friend class BoundsExpr; unsigned : NumExprBits; unsigned Kind : NumBoundsExprKindBits; unsigned IsCompilerGenerated : 1; }; enum { NumInteropTypeExprKindBits = 1 }; class InteropTypeExprBitfields { friend class InteropTypeExpr; unsigned : NumExprBits; unsigned IsCompilerGenerated : 1; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; BoundsExprBitfields BoundsExprBits; InteropTypeExprBitfields InteropTypeExprBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; StmtBits.IsOMPStructuredBlock = false; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; } void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) { StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock; } /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; // The kind of Checked C checking to do in a scope. enum class CheckedScopeKind { // No checking. Unchecked = 0x1, /// Check properties for bounds safety. Bounds = 0x2, /// Check properties for bounds safety and preventing type confusion. BoundsAndTypes = 0x4 }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; // Written checked scope specifier. unsigned WrittenCSS : 2; // Inferred checked scope specifier, using information from parent // scope also. unsigned CSS : 2; // Checked scope keyword (_Checked / _Unchecked) location. SourceLocation CSSLoc; // Checked scope modifier (_Bounds_only) location. SourceLocation CSMLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB, CheckedScopeSpecifier WrittenCSS = CSS_None, CheckedScopeSpecifier CSS = CSS_Unchecked, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation()); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(), CSMLoc() {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB, CheckedScopeSpecifier WrittenCSS = CSS_None, CheckedScopeSpecifier CSS = CSS_Unchecked, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation()); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc), WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(Loc), CSMLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } CheckedScopeSpecifier getWrittenCheckedSpecifier() const { return (CheckedScopeSpecifier) WrittenCSS; } CheckedScopeSpecifier getCheckedSpecifier() const { return (CheckedScopeSpecifier) CSS; } void setWrittenCheckedSpecifiers(CheckedScopeSpecifier NS) { WrittenCSS = NS; } void setCheckedSpecifiers(CheckedScopeSpecifier NS) { CSS = NS; } bool isCheckedScope() const { return CSS != CSS_Unchecked; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } SourceLocation getCheckedSpecifierLoc() const { return CSSLoc; } SourceLocation getSpecifierModifierLoc() const { return CSMLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
gradbm_mex.c
#include <inttypes.h> #include <omp.h> #include "mex.h" void gradbmf(float *dx, float *dy, float *dz, const float *u, const uint8_t *G, const double *h, const size_t *sz); void gradbmd(double *dx, double *dy, double *dz, const double *u, const uint8_t *G, const double *h, const size_t *sz); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs != 6) || (nlhs > 1)) { mexErrMsgTxt("Usage: gradbm_mex(dx, dy, dz, u, G, h);"); return; } const uint8_t *G = (const uint8_t *)mxGetData(prhs[4]); const double *h = (const double *)mxGetData(prhs[5]); const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]); if (mxIsSingle(prhs[0])) { float *dx = (float *)mxGetData(prhs[0]); float *dy = (float *)mxGetData(prhs[1]); float *dz = (float *)mxGetData(prhs[2]); const float *u = (const float *)mxGetData(prhs[3]); gradbmf(dx, dy, dz, u, G, h, sz); } else { double *dx = (double *)mxGetData(prhs[0]); double *dy = (double *)mxGetData(prhs[1]); double *dz = (double *)mxGetData(prhs[2]); const double *u = (const double *)mxGetData(prhs[3]); gradbmd(dx, dy, dz, u, G, h, sz); } if (nlhs == 1) { plhs[0] = mxCreateDoubleScalar(1.0); } return; } void gradbmf(float *dx, float *dy, float *dz, const float *u, const uint8_t *G, const double *h, const size_t *sz) { size_t i, j, k; size_t l; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nxnynz = nx*ny*nz; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const float hx = (float)(1.0/h[0]); const float hy = (float)(1.0/h[1]); const float hz = (float)(1.0/h[2]); #pragma omp parallel for private(i,j,k,l) schedule(static) \ if(nxnynz > 16*16*16) for(k = 0; k < nxnynz; k += nxny) { for(j = 0; j < nxny; j += nx) { l = j + k; for(i = 0; i < nx; ++i, ++l) { if (G[l]) { dz[l] = (k > 0) && G[l-nxny] ? hz*(u[l]-u[l-nxny]) : (k < NZ) && G[l+nxny] ? hz*(u[l+nxny]-u[l]) : 0.0f; dy[l] = (j > 0) && G[l-nx] ? hy*(u[l]-u[l-nx]) : (j < NY) && G[l+nx] ? hy*(u[l+nx]-u[l]) : 0.0f; dx[l] = (i > 0) && G[l-1] ? hx*(u[l]-u[l-1]) : (i < NX) && G[l+1] ? hx*(u[l+1]-u[l]) : 0.0f; } } } } return; } void gradbmd(double *dx, double *dy, double *dz, const double *u, const uint8_t *G, const double *h, const size_t *sz) { size_t i, j, k; size_t l; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nxnynz = nx*ny*nz; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const double hx = 1.0/h[0]; const double hy = 1.0/h[1]; const double hz = 1.0/h[2]; #pragma omp parallel for private(i,j,k,l) schedule(static) \ if(nxnynz > 16*16*16) for(k = 0; k < nxnynz; k += nxny) { for(j = 0; j < nxny; j += nx) { l = j + k; for(i = 0; i < nx; ++i, ++l) { if (G[l]) { dz[l] = (k > 0) && G[l-nxny] ? hz*(u[l]-u[l-nxny]) : (k < NZ) && G[l+nxny] ? hz*(u[l+nxny]-u[l]) : 0.0; dy[l] = (j > 0) && G[l-nx] ? hy*(u[l]-u[l-nx]) : (j < NY) && G[l+nx] ? hy*(u[l+nx]-u[l]) : 0.0; dx[l] = (i > 0) && G[l-1] ? hx*(u[l]-u[l-1]) : (i < NX) && G[l+1] ? hx*(u[l+1]-u[l]) : 0.0; } } } } return; }
MultiHashFunction.h
/* * MultiHashFunction.h * * Created on: 10/feb/2017 * Author: samuele */ #ifndef HASH_MULTIHASHFUNCTION_H_ #define HASH_MULTIHASHFUNCTION_H_ #include "HashFunction.h" inline static void GetHashes_speedup_multi_unit(const string& s_Str, const SpacedQmer_Multi& spaced_qmers, Hash_Err_V_V& vHashes, hash_type (*fConvertion)(char)) { //Inizializzo vettore per gli hash e errori // if(vHashes.size() != v_spaced.size()) // { // vHashes.resize(v_spaced.size()); // vHashes.shrink_to_fit(); // } //Get hash v for all unit present const MapUnit& map_unit = spaced_qmers.getMapUnit(); Hash_Err_V_V hash_v(map_unit.n_one.size()); #pragma omp parallel for for(size_t i = 0; i < map_unit.n_one.size(); ++i)//parallel computation GetHashes_speedup_previous(s_Str, map_unit.n_one[i], hash_v[i], fConvertion); #pragma omp parallel for for(size_t s = 0; s < spaced_qmers.size(); ++s) { const SpacedQmer& spaced = spaced_qmers[s]; long n_hashes = s_Str.size()-spaced.GetQ()+1; Hash_Err_V& hashes = vHashes[s]; hashes.clear(); if(n_hashes>0) { hashes.resize(n_hashes); //Crea vettore //Combine different hash const V_Pos_Ones& v_pos = map_unit.v_v_pos[s]; #pragma omp parallel for for(size_t i = 0; i < hashes.size(); ++i) { Hash_Err& curr_hash = hashes[i]; for(const Pos_Ones& unit_pos : v_pos) { Hash_Err_V& hash_unit_v = hash_v[unit_pos.index_one]; Hash_Err& hash_unit = hash_unit_v[i+unit_pos.pos_start]; curr_hash.hash |= (hash_unit.hash << unit_pos.n_one_before*2); if(!hash_unit.isCorrect()) curr_hash.add_pos_err(unit_pos.n_one_before, hash_unit);//aggiungi errore posizione corretta } } } } } #endif /* HASH_MULTIHASHFUNCTION_H_ */
mm_gomp.c
/****************************************************************************** * FILE: omp_mm.c * DESCRIPTION: * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. Threads share row iterations * according to a predefined chunk size. * AUTHOR: Blaise Barney * LAST REVISED: 06/28/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define NRA 62 /* number of rows in matrix A */ #define NCA 15 /* number of columns in matrix A */ #define NCB 7 /* number of columns in matrix B */ int main (int argc, char *argv[]) { int tid, nthreads, i, j, k, chunk; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ chunk = 10; /* set loop iteration chunk size */ /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; #pragma omp for schedule (static, chunk) for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ printf("Thread %d starting matrix multiply...\n",tid); #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) { printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<NCB; j++) for (k=0; k<NCA; k++) c[i][j] += a[i][k] * b[k][j]; } } /*** End of parallel region ***/ /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); }
14_omp_nested.c
// clang-format off // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst // RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst // REQUIRES: openmp // clang-format on extern void MPI_call(void*); void func(int* x) { #pragma omp parallel { MPI_call(x); } } void foo() { // check-inst: define {{.*}} @foo // check-inst: %x = alloca // check-inst: %0 = bitcast i32* %x to i8* // check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1) // check-inst-not: __typeart_alloc_stack_omp int x; #pragma omp parallel { func(&x); } } // CHECK: TypeArtPass [Heap & Stack] // CHECK-NEXT: Malloc : 0 // CHECK-NEXT: Free : 0 // CHECK-NEXT: Alloca : 1 // CHECK-NEXT: Global : 0
papi.c
#include "XSbench_header.h" void counter_init( int *eventset, int *num_papi_events ) { char error_str[PAPI_MAX_STR_LEN]; // int events[] = {PAPI_TOT_INS,PAPI_BR_INS,PAPI_SR_INS}; int events[] = {PAPI_TOT_CYC,PAPI_L3_TCM}; int stat; int thread = omp_get_thread_num(); if( thread == 0 ) printf("Initializing PAPI counters...\n"); *num_papi_events = sizeof(events) / sizeof(int); if ((stat = PAPI_thread_init((long unsigned int (*)(void)) omp_get_thread_num)) != PAPI_OK){ PAPI_perror("PAPI_thread_init"); exit(1); } if ( (stat= PAPI_create_eventset(eventset)) != PAPI_OK){ PAPI_perror("PAPI_create_eventset"); exit(1); } for( int i = 0; i < *num_papi_events; i++ ){ if ((stat=PAPI_add_event(*eventset,events[i])) != PAPI_OK){ PAPI_perror("PAPI_add_event"); exit(1); } } if ((stat=PAPI_start(*eventset)) != PAPI_OK){ PAPI_perror("PAPI_start"); exit(1); } } // Stops the papi counters and prints results void counter_stop( int * eventset, int num_papi_events ) { int * events = malloc(num_papi_events * sizeof(int)); int n = num_papi_events; PAPI_list_events( *eventset, events, &n ); PAPI_event_info_t info; long_long * values = malloc( num_papi_events * sizeof(long_long)); PAPI_stop(*eventset, values); int thread = omp_get_thread_num(); #pragma omp critical (papi) { printf("Thread %d\n", thread); for( int i = 0; i < num_papi_events; i++ ) { PAPI_get_event_info(events[i], &info); printf("%-15lld\t%s\t%s\n", values[i],info.symbol,info.long_descr); } free(events); free(values); } }
triplet_iw.c
/* Copyright (C) 2016 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "triplet_iw.h" #include <math.h> #include "grgrid.h" #include "phonoc_utils.h" #include "tetrahedron_method.h" #include "triplet.h" static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies1, const double *frequencies2, const long vertices[2][24][4], const long num_band1, const long num_band2, const long b1, const long b2, const long tp_type); static long set_g(double g[3], const double f0, const double freq_vertices[3][24][4], const long max_i); static void get_triplet_tetrahedra_vertices( long vertices[2][24][4], const long tp_relative_grid_address[2][24][4][3], const long triplet[3], const ConstBZGrid *bzgrid); static void get_neighboring_grid_points_type1( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid); static void get_neighboring_grid_points_type2( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid); void tpi_get_integration_weight( double *iw, char *iw_zero, const double *frequency_points, const long num_band0, const long tp_relative_grid_address[2][24][4][3], const long triplets[3], const long num_triplets, const ConstBZGrid *bzgrid, const double *frequencies1, const long num_band1, const double *frequencies2, const long num_band2, const long tp_type, const long openmp_per_bands) { long max_i, j, b1, b2, b12, num_band_prod, adrs_shift; long vertices[2][24][4]; double g[3]; double freq_vertices[3][24][4]; get_triplet_tetrahedra_vertices(vertices, tp_relative_grid_address, triplets, bzgrid); num_band_prod = num_triplets * num_band0 * num_band1 * num_band2; /* tp_type: Type of integration weights stored */ /* */ /* g0 -> \delta(f0 - (-f1 + f2)) */ /* g1 -> \delta(f0 - (f1 - f2)) */ /* g2 -> \delta(f0 - (f1 + f2)) */ /* */ /* tp_type = 2: (g[2], g[0] - g[1]) mainly for ph-ph */ /* tp_type = 3: (g[2], g[0] - g[1], g[0] + g[1] + g[2]) mainly for ph-ph */ /* tp_type = 4: (g[0]) mainly for el-ph phonon decay, */ /* f0: ph, f1: el_i, f2: el_f */ if ((tp_type == 2) || (tp_type == 3)) { max_i = 3; } if (tp_type == 4) { max_i = 1; } #ifdef PHPYOPENMP #pragma omp parallel for private(j, b1, b2, adrs_shift, g, \ freq_vertices) if (openmp_per_bands) #endif for (b12 = 0; b12 < num_band1 * num_band2; b12++) { b1 = b12 / num_band2; b2 = b12 % num_band2; set_freq_vertices(freq_vertices, frequencies1, frequencies2, vertices, num_band1, num_band2, b1, b2, tp_type); for (j = 0; j < num_band0; j++) { adrs_shift = j * num_band1 * num_band2 + b1 * num_band2 + b2; iw_zero[adrs_shift] = set_g(g, frequency_points[j], freq_vertices, max_i); if (tp_type == 2) { iw[adrs_shift] = g[2]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] - g[1]; } if (tp_type == 3) { iw[adrs_shift] = g[2]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] - g[1]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] + g[1] + g[2]; } if (tp_type == 4) { iw[adrs_shift] = g[0]; } } } } void tpi_get_integration_weight_with_sigma( double *iw, char *iw_zero, const double sigma, const double cutoff, const double *frequency_points, const long num_band0, const long triplet[3], const long const_adrs_shift, const double *frequencies, const long num_band, const long tp_type, const long openmp_per_bands) { long j, b12, b1, b2, adrs_shift; double f0, f1, f2, g0, g1, g2; #ifdef PHPYOPENMP #pragma omp parallel for private(j, b1, b2, f0, f1, f2, g0, g1, g2, \ adrs_shift) if (openmp_per_bands) #endif for (b12 = 0; b12 < num_band * num_band; b12++) { b1 = b12 / num_band; b2 = b12 % num_band; f1 = frequencies[triplet[1] * num_band + b1]; f2 = frequencies[triplet[2] * num_band + b2]; for (j = 0; j < num_band0; j++) { f0 = frequency_points[j]; adrs_shift = j * num_band * num_band + b1 * num_band + b2; if ((tp_type == 2) || (tp_type == 3)) { if (cutoff > 0 && fabs(f0 + f1 - f2) > cutoff && fabs(f0 - f1 + f2) > cutoff && fabs(f0 - f1 - f2) > cutoff) { iw_zero[adrs_shift] = 1; g0 = 0; g1 = 0; g2 = 0; } else { iw_zero[adrs_shift] = 0; g0 = phonoc_gaussian(f0 + f1 - f2, sigma); g1 = phonoc_gaussian(f0 - f1 + f2, sigma); g2 = phonoc_gaussian(f0 - f1 - f2, sigma); } if (tp_type == 2) { iw[adrs_shift] = g2; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 - g1; } if (tp_type == 3) { iw[adrs_shift] = g2; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 - g1; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 + g1 + g2; } } if (tp_type == 4) { if (cutoff > 0 && fabs(f0 + f1 - f2) > cutoff) { iw_zero[adrs_shift] = 1; iw[adrs_shift] = 0; } else { iw_zero[adrs_shift] = 0; iw[adrs_shift] = phonoc_gaussian(f0 + f1 - f2, sigma); } } } } } void tpi_get_neighboring_grid_points(long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid) { if (bzgrid->type == 1) { get_neighboring_grid_points_type1(neighboring_grid_points, grid_point, relative_grid_address, num_relative_grid_address, bzgrid); } else { get_neighboring_grid_points_type2(neighboring_grid_points, grid_point, relative_grid_address, num_relative_grid_address, bzgrid); } } static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies1, const double *frequencies2, const long vertices[2][24][4], const long num_band1, const long num_band2, const long b1, const long b2, const long tp_type) { long i, j; double f1, f2; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { f1 = frequencies1[vertices[0][i][j] * num_band1 + b1]; f2 = frequencies2[vertices[1][i][j] * num_band2 + b2]; if ((tp_type == 2) || (tp_type == 3)) { if (f1 < 0) { f1 = 0; } if (f2 < 0) { f2 = 0; } freq_vertices[0][i][j] = -f1 + f2; freq_vertices[1][i][j] = f1 - f2; freq_vertices[2][i][j] = f1 + f2; } else { freq_vertices[0][i][j] = -f1 + f2; } } } } /* Integration weight g is calculated. */ /* iw_zero = 1 means g[0] to g[max_i - 1] are all zero. */ /* max_i depends on what we compute, e.g., ph-ph lifetime, */ /* ph-ph collision matrix, and el-ph relaxation time. */ /* iw_zero is definitely determined by in_tetrahedra in case that */ /* f0 is out of the tetrahedra. */ /* iw_zero=1 information can be used to omit to compute particles */ /* interaction strength that is often heaviest part in throughout */ /* calculation. */ static long set_g(double g[3], const double f0, const double freq_vertices[3][24][4], const long max_i) { long i, iw_zero; iw_zero = 1; for (i = 0; i < max_i; i++) { if (thm_in_tetrahedra(f0, freq_vertices[i])) { g[i] = thm_get_integration_weight(f0, freq_vertices[i], 'I'); iw_zero = 0; } else { g[i] = 0; } } return iw_zero; } static void get_triplet_tetrahedra_vertices( long vertices[2][24][4], const long tp_relative_grid_address[2][24][4][3], const long triplet[3], const ConstBZGrid *bzgrid) { long i, j; for (i = 0; i < 2; i++) { for (j = 0; j < 24; j++) { tpi_get_neighboring_grid_points(vertices[i][j], triplet[i + 1], tp_relative_grid_address[i][j], 4, bzgrid); } } } static void get_neighboring_grid_points_type1( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid) { long bzmesh[3], bz_address[3]; long i, j, bz_gp, prod_bz_mesh; for (i = 0; i < 3; i++) { bzmesh[i] = bzgrid->D_diag[i] * 2; } prod_bz_mesh = bzmesh[0] * bzmesh[1] * bzmesh[2]; for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { bz_address[j] = bzgrid->addresses[grid_point][j] + relative_grid_address[i][j]; } bz_gp = bzgrid->gp_map[grg_get_grid_index(bz_address, bzmesh)]; if (bz_gp == prod_bz_mesh) { neighboring_grid_points[i] = grg_get_grid_index(bz_address, bzgrid->D_diag); } else { neighboring_grid_points[i] = bz_gp; } } } static void get_neighboring_grid_points_type2( long *neighboring_grid_points, const long grid_point, const long (*relative_grid_address)[3], const long num_relative_grid_address, const ConstBZGrid *bzgrid) { long bz_address[3]; long i, j, gp; for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { bz_address[j] = bzgrid->addresses[grid_point][j] + relative_grid_address[i][j]; } gp = grg_get_grid_index(bz_address, bzgrid->D_diag); neighboring_grid_points[i] = bzgrid->gp_map[gp]; if (bzgrid->gp_map[gp + 1] - bzgrid->gp_map[gp] > 1) { for (j = bzgrid->gp_map[gp]; j < bzgrid->gp_map[gp + 1]; j++) { if (bz_address[0] == bzgrid->addresses[j][0] && bz_address[1] == bzgrid->addresses[j][1] && bz_address[2] == bzgrid->addresses[j][2]) { neighboring_grid_points[i] = j; break; } } } } }
edge_vol_int.c
/****************************************************************************** ** Copyright (c) 2016-2017, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #if defined(_WIN32) || defined(__CYGWIN__) /* note: later on, this leads to (correct but) different than expected norm-values */ # define drand48() ((double)rand() / RAND_MAX) # define srand48 srand #endif static double sec(struct timeval start, struct timeval end) { return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6; } void edge_sparse_csr_reader( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) { /* allocate CSC datastructue matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr descripton!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value strucuture */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to onw for this column, yeah we need to hanle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consitent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } } int main(int argc, char* argv[]) { char* mat_a; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double* mat_a_values; libxsmm_dmmfunction a_kernel; char* mat_b; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double* mat_b_values; libxsmm_dmmfunction b_kernel; char* mat_c; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double* mat_c_values; libxsmm_dmmfunction c_kernel; char* mat_st; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double* mat_st_values; libxsmm_dmmfunction st_kernel; size_t num_elems; size_t num_modes; size_t num_quants = 9; size_t num_cfr = 8; size_t num_reps; size_t elem_size; size_t i, j; libxsmm_gemm_descriptor l_xgemm_desc_stiff; libxsmm_gemm_descriptor l_xgemm_desc_star; double* q; double* qt; double* t; double* tp; struct timeval l_start, l_end; double l_total; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } srand48(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > i) mat_a = argv[i++]; if (argc > i) mat_b = argv[i++]; if (argc > i) mat_c = argv[i++]; if (argc > i) mat_st = argv[i++]; if (argc > i) num_modes = atoi(argv[i++]); if (argc > i) num_elems = atoi(argv[i++]); if (argc > i) num_reps = atoi(argv[i++]); elem_size = num_modes*num_quants*num_cfr; /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz ); edge_sparse_csr_reader( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz ); edge_sparse_csr_reader( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz ); edge_sparse_csr_reader( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz ); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); LIBXSMM_GEMM_DESCRIPTOR(l_xgemm_desc_stiff, 1, 0, num_quants, num_modes, num_modes, num_modes, 0, num_modes, 1.0, 1.0, LIBXSMM_PREFETCH_NONE); LIBXSMM_GEMM_DESCRIPTOR(l_xgemm_desc_star, 1, 0, num_quants, num_modes, num_quants, 0, num_modes, num_modes, 1.0, 1.0, LIBXSMM_PREFETCH_NONE); a_kernel = libxsmm_create_dcsr_soa( &l_xgemm_desc_stiff, mat_a_rowptr, mat_a_colidx, mat_a_values ).dmm; b_kernel = libxsmm_create_dcsr_soa( &l_xgemm_desc_stiff, mat_b_rowptr, mat_b_colidx, mat_b_values ).dmm; c_kernel = libxsmm_create_dcsr_soa( &l_xgemm_desc_stiff, mat_c_rowptr, mat_c_colidx, mat_c_values ).dmm; st_kernel = libxsmm_create_dcsr_soa( &l_xgemm_desc_star, mat_st_rowptr, mat_st_colidx, mat_st_values ).dmm; if ( a_kernel == 0 || b_kernel == 0 || c_kernel == 0 || st_kernel == 0 ) { printf("one of the kernels could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* create unkowns and tunkowns */ printf("allocating and initializing fake data... \n"); printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); #ifdef _OPENMP printf(" t: %f MiB\n", ((double)(omp_get_max_threads()*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); #else printf(" t: %f MiB\n", ((double)(num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); #endif q = (double*)libxsmm_aligned_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); qt = (double*)libxsmm_aligned_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); #pragma omp parallel for private(i,j) for ( i = 0; i < num_elems; i++ ) { for ( j = 0; j < elem_size; j++) { q[i*elem_size + j] = drand48(); } } #pragma omp parallel for private(i,j) for ( i = 0; i < num_elems; i++ ) { for ( j = 0; j < elem_size; j++) { qt[i*elem_size + j] = drand48(); } } printf("done!\n\n"); /* benchmark single core all kernels */ printf("benchmarking kernels... \n"); gettimeofday(&l_start, NULL); for ( i = 0; i < num_reps; i++) { a_kernel( qt, mat_a_values, q ); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); printf("%fs for stiff1 (asm)\n", l_total); printf("%f GFLOPS for stiff1 (asm)\n", ((double)((double)num_reps * (double)num_quants * (double)mat_a_nnz * (double)num_cfr) * 2.0) / (l_total * 1.0e9)); gettimeofday(&l_start, NULL); for ( i = 0; i < num_reps; i++) { b_kernel( qt, mat_b_values, q ); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); printf("%fs for stiff2 (asm)\n", l_total); printf("%f GFLOPS for stiff2 (asm)\n", ((double)((double)num_reps * (double)num_quants * (double)mat_b_nnz * (double)num_cfr) * 2.0) / (l_total * 1.0e9)); gettimeofday(&l_start, NULL); for ( i = 0; i < num_reps; i++) { c_kernel( qt, mat_c_values, q ); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); printf("%fs for stiff3 (asm)\n", l_total); printf("%f GFLOPS for stiff3 (asm)\n", ((double)((double)num_reps * (double)num_quants * (double)mat_c_nnz * (double)num_cfr) * 2.0) / (l_total * 1.0e9)); gettimeofday(&l_start, NULL); for ( i = 0; i < num_reps; i++) { st_kernel( mat_st_values, qt, q ); } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); printf("%fs for star (asm)\n", l_total); printf("%f GFLOPS for star (asm)\n", ((double)((double)num_reps * (double)num_modes * (double)mat_st_nnz * (double)num_cfr) * 2.0) / (l_total * 1.0e9)); printf("done!\n\n"); /* benchmark volumne integration */ #pragma omp parallel for private(i,j) for ( i = 0; i < num_elems; i++ ) { for ( j = 0; j < elem_size; j++) { q[i*elem_size + j] = drand48(); } } #pragma omp parallel for private(i,j) for ( i = 0; i < num_elems; i++ ) { for ( j = 0; j < elem_size; j++) { qt[i*elem_size + j] = drand48(); } } gettimeofday(&l_start, NULL); for ( i = 0; i < num_reps; i++) { #pragma omp parallel private(i, j) { __attribute__((aligned(64))) double tp[20*8*9]; #pragma omp for private(j) for ( j = 0; j < num_elems; j++ ) { st_kernel( mat_st_values, qt+(j*elem_size), tp ); a_kernel( tp, mat_a_values, q+(j*elem_size) ); st_kernel( mat_st_values, qt+(j*elem_size), tp ); b_kernel( tp, mat_b_values, q+(j*elem_size) ); st_kernel( mat_st_values, qt+(j*elem_size), tp ); c_kernel( tp, mat_c_values, q+(j*elem_size) ); } } } gettimeofday(&l_end, NULL); l_total = sec(l_start, l_end); printf("%fs for vol (asm)\n", l_total); printf("%f GFLOPS for vol (asm)\n", ((double)((double)num_elems * (double)num_reps * 3.0 * ((double)num_quants + (double)num_modes) * (double)mat_st_nnz * (double)num_cfr) * 2.0) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) ); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
userom.c
/*- * Copyright 2013-2015 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define YESCRYPT_FLAGS YESCRYPT_RW //#define YESCRYPT_FLAGS YESCRYPT_WORM #define ROM_SHM_KEY 0x524f4d0a //#define DISABLE_ROM //#define DUMP_LOCAL #include <stdio.h> #include <stdlib.h> /* for atoi() */ #include <string.h> #include <unistd.h> #include <sys/shm.h> #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/times.h> #ifdef _OPENMP #include <omp.h> #define NSAVE 1000 #endif #include "yescrypt.h" int main(int argc, const char * const *argv) { #if 0 uint64_t rom_bytes = 112 * (1024ULL*1024*1024); uint64_t ram_bytes = 1 * (1024ULL*1024); #else uint64_t rom_bytes = 3 * (1024ULL*1024*1024); uint64_t ram_bytes = 2 * (1024ULL*1024); #endif uint32_t r, min_r; uint64_t NROM_log2, N_log2; yescrypt_shared_t shared_s; yescrypt_shared_t * shared = NULL; #ifndef DISABLE_ROM int shmid; #endif const char * rom_filename = NULL; int rom_fd; if (argc > 1) rom_bytes = atoi(argv[1]) * (1024ULL*1024*1024); if (argc > 2) ram_bytes = atoi(argv[2]) * (1024ULL*1024); if (argc > 3 && rom_bytes) rom_filename = argv[3]; r = 8; min_r = 5; if (rom_filename) min_r = 8 * 64; NROM_log2 = 0; if (rom_bytes) { while (((rom_bytes >> NROM_log2) & 0xff) == 0) NROM_log2++; r = rom_bytes >> (7 + NROM_log2); while (r < min_r && NROM_log2 > 0) { r <<= 1; NROM_log2--; } rom_bytes = (uint64_t)r << (7 + NROM_log2); } N_log2 = 0; while (((uint64_t)r << (7 + N_log2)) < ram_bytes) N_log2++; ram_bytes = (uint64_t)r << (7 + N_log2); printf("r=%u N=2^%u NROM=2^%u\n", r, (unsigned int)N_log2, (unsigned int)NROM_log2); #ifdef DISABLE_ROM rom_bytes = 0; #endif printf("Will use %.2f KiB ROM\n", rom_bytes / 1024.0); printf(" %.2f KiB RAM\n", ram_bytes / 1024.0); #ifndef DISABLE_ROM if (rom_filename) { rom_fd = open(rom_filename, O_RDONLY); if (rom_fd < 0) { perror("open"); return 1; } int flags = #ifdef MAP_NOCORE MAP_NOCORE | #endif #ifdef MAP_HUGETLB MAP_HUGETLB | #endif MAP_SHARED; void * p = mmap(NULL, rom_bytes, PROT_READ, flags, rom_fd, 0); #ifdef MAP_HUGETLB if (p == MAP_FAILED) p = mmap(NULL, rom_bytes, PROT_READ, flags & ~MAP_HUGETLB, rom_fd, 0); #endif if (p == MAP_FAILED) { perror("mmap"); close(rom_fd); return 1; } close(rom_fd); shared = &shared_s; shared->base = shared->aligned = p; shared->aligned_size = rom_bytes; } else if (rom_bytes) { shared = &shared_s; shared->aligned_size = rom_bytes; shmid = shmget(ROM_SHM_KEY, shared->aligned_size, 0); if (shmid == -1) { perror("shmget"); return 1; } shared->base = shared->aligned = shmat(shmid, NULL, SHM_RDONLY); if (shared->base == (void *)-1) { perror("shmat"); return 1; } } #endif { yescrypt_local_t local; const uint8_t *setting; if (yescrypt_init_local(&local)) { puts("yescrypt_init_local() FAILED"); return 1; } setting = yescrypt_gensalt( N_log2, r, 1, YESCRYPT_FLAGS, (const uint8_t *)"binary data", 12); { uint8_t hash[128]; printf("'%s'\n", (char *)yescrypt_r(shared, &local, (const uint8_t *)"pleaseletmein", 13, setting, hash, sizeof(hash))); } #ifdef DUMP_LOCAL #if 0 fwrite(local.aligned, local.aligned_size, 1, stderr); #else /* Skip B, dump only V */ if (local.aligned_size >= ram_bytes + 128 * r) fwrite((char *)local.aligned + 128 * r, ram_bytes, 1, stderr); #endif #endif puts("Benchmarking 1 thread ..."); clock_t clk_tck = sysconf(_SC_CLK_TCK); struct tms start_tms, end_tms; clock_t start = times(&start_tms), end; unsigned int i, n; unsigned long long count; #ifdef _OPENMP char save[NSAVE][128]; unsigned int nsave = 0; #endif unsigned int seed = start * 1812433253U; n = 1; count = 0; do { for (i = 0; i < n; i++) { unsigned int j = count + i; char p[32]; uint8_t hash[128]; snprintf(p, sizeof(p), "%u", seed + j); #ifdef _OPENMP const uint8_t *h = #endif yescrypt_r(shared, &local, (const uint8_t *)p, strlen(p), setting, hash, sizeof(hash)); #ifdef _OPENMP if (j < NSAVE) { save[j][0] = 0; strncat(save[j], (char *)h, sizeof(save[j]) - 1); nsave = j; } #endif } count += n; end = times(&end_tms); n <<= 1; } while (end - start < clk_tck); clock_t start_v = start_tms.tms_utime + start_tms.tms_stime + start_tms.tms_cutime + start_tms.tms_cstime; clock_t end_v = end_tms.tms_utime + end_tms.tms_stime + end_tms.tms_cutime + end_tms.tms_cstime; printf("%llu c/s real, %llu c/s virtual " "(%llu hashes in %.2f seconds)\n", count * clk_tck / (end - start), count * clk_tck / (end_v - start_v), count, (double)(end - start) / clk_tck); #ifdef _OPENMP unsigned int nt = omp_get_max_threads(); printf("Benchmarking %u threads ...\n", nt); yescrypt_local_t locals[nt]; unsigned int t; for (t = 0; t < nt; t++) { if (yescrypt_init_local(&locals[t])) { puts("yescrypt_init_local() FAILED"); return 1; } } unsigned long long count1 = count, count_restart = 0; start = times(&start_tms); n = count; count = 0; do { #pragma omp parallel for default(none) private(i) shared(n, shared, locals, setting, seed, count, save, nsave) for (i = 0; i < n; i++) { unsigned int j = count + i; char p[32]; uint8_t hash[128]; snprintf(p, sizeof(p), "%u", seed + j); #if 1 const uint8_t *h = yescrypt_r(shared, &locals[omp_get_thread_num()], (const uint8_t *)p, strlen(p), setting, hash, sizeof(hash)); #else yescrypt_local_t local; yescrypt_init_local(&local); const uint8_t *h = yescrypt_r(shared, &local, (const uint8_t *)p, strlen(p), setting, hash, sizeof(hash)); yescrypt_free_local(&local); #endif if (j < nsave && strcmp(save[j], (char *)h)) { #pragma omp critical printf("Mismatch at %u, %s != %s\n", j, save[j], (char *)h); } } count += n; if ((count - n) < count1 && count >= count1) { start = times(&start_tms); count_restart = count; } else n <<= 1; end = times(&end_tms); } while (end - start < clk_tck); if (!count_restart) puts("Didn't reach single-thread's hash count"); count -= count_restart; start_v = start_tms.tms_utime + start_tms.tms_stime + start_tms.tms_cutime + start_tms.tms_cstime; end_v = end_tms.tms_utime + end_tms.tms_stime + end_tms.tms_cutime + end_tms.tms_cstime; printf("%llu c/s real, %llu c/s virtual " "(%llu hashes in %.2f seconds)\n", count * clk_tck / (end - start), count * clk_tck / (end_v - start_v), count, (double)(end - start) / clk_tck); #endif } if (rom_filename && munmap(shared->base, rom_bytes)) { perror("munmap"); return 1; } return 0; }
dataracetest4.c
int kernel() { int ni;int nj;int nk;int nl;int nm;double A[128 + 0][128 + 0];double B[128 + 0][128 + 0];double C[128 + 0][128 + 0];double D[128 + 0][128 + 0]; int c2; int c1; ni=3;nj=5;nk=2;nl=4;nm=3;c2=0;c1=0; if (nj >= 1 && nl <= 0) { #pragma omp parallel for private(c2) for (c1 = nj; c1 <= ((((ni + -1 < nk + -1?ni + -1 : nk + -1)) < nm + -1?((ni + -1 < nk + -1?ni + -1 : nk + -1)) : nm + -1)); c1++) { for (c2 = 0; c2 <= nj + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } if (nj >= 1) { #pragma omp parallel for private(c2) for (c1 = (nj > nm?nj : nm); c1 <= ((ni + -1 < nk + -1?ni + -1 : nk + -1)); c1++) { for (c2 = 0; c2 <= nj + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; B[c1][c2] = ((double )c1) * (c2 + 1) / nj; } for (c2 = nj; c2 <= nk + -1; c2++) { A[c1][c2] = ((double )c1) * c2 / ni; } } } } int main() { function(); return 0; }
fig3.12-conditional-comp.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #endif int main(int argc, char *argv[]) { int TID = omp_get_thread_num(); printf("Thread ID of the master thread is %d\n",TID); #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(4); #endif #pragma omp parallel { int TID = omp_get_thread_num(); printf("In parallel region - Thread ID is %d\n",TID); } /*-- End of parallel region --*/ return(0); }
SpatialFullConvolutionMap.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialFullConvolutionMap.c" #else void THNN_(SpatialFullConvolutionMap_updateOutput)( THNNState *state, THTensor *input, THTensor *output_, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH) { THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); // What does this mean? THArgCheck( weight != NULL && !weight->is_empty() && weight->dim() == 3 && connTable != NULL && connTable->size(0) == weight->size(0), 4, "non-empty 3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE ); const int kH = (int)weight->size(1); const int kW = (int)weight->size(2); THArgCheck(input != NULL && !input->is_empty() && input->dim() == 3, 2, "non-empty 3D tensor expected"); THArgCheck(input->size(0) >= nInputPlane, 2, "invalid number of input planes"); THTensor_(resize3d)( output_, nOutputPlane, (input->size(1) - 1) * dH + kH, (input->size(2) - 1) * dW + kW ); /* contiguous */ input = THTensor_(newContiguous)(input); THTensor* output = THTensor_(newContiguous)(output_); /* get raw pointers */ real *input_data = THTensor_(data)(input); real *output_data = THTensor_(data)(output); real *weight_data = THTensor_(data)(weight); real *bias_data = THTensor_(data)(bias); real *connTable_data = THTensor_(data)(connTable); /* and dims */ const int64_t input_h = input->size(1); const int64_t input_w = input->size(2); const int64_t output_h = output->size(1); const int64_t output_w = output->size(2); const int64_t weight_h = weight->size(1); const int64_t weight_w = weight->size(2); int64_t p; #pragma omp parallel for private(p) for (p = 0; p < nOutputPlane; p++) { /* add bias */ real *ptr_output = output_data + p*output_w*output_h; int64_t j; int nweight; int64_t k; for (j = 0; j < output_h*output_w; j++) ptr_output[j] = bias_data[p]; /* convolve all maps */ nweight = connTable->size(0); for (k = 0; k < nweight; k++) { /* get offsets for input/output */ int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; if (o == p) { THTensor_(fullConv2Dptr)( output_data + o*output_w*output_h, 1.0, input_data + i*input_w*input_h, input_h, input_w, weight_data + k*weight_w*weight_h, weight_h, weight_w, dH, dW ); } } } /* clean up */ THTensor_(free)(input); THTensor_(freeCopyTo)(output, output_); } void THNN_(SpatialFullConvolutionMap_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput_, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH) { THArgCheck( weight != NULL && !weight->is_empty() && weight->dim() == 3 && connTable != NULL && connTable->size(0) == weight->size(0), 5, "non-empty 3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE ); /* contiguous */ THTensor* gradInput = THTensor_(newContiguous)(gradInput_); gradOutput = THTensor_(newContiguous)(gradOutput); /* Resize/Zero */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); /* get raw pointers */ real *gradInput_data = THTensor_(data)(gradInput); real *gradOutput_data = THTensor_(data)(gradOutput); real *weight_data = THTensor_(data)(weight); real *connTable_data = THTensor_(data)(connTable); /* and dims */ const int64_t input_h = input->size(1); const int64_t input_w = input->size(2); const int64_t output_h = gradOutput->size(1); const int64_t output_w = gradOutput->size(2); const int64_t kH = weight->size(1); const int64_t kW = weight->size(2); int64_t p; #pragma omp parallel for private(p) for (p = 0; p < nInputPlane; p++) { int64_t k; /* backward all */ int nkernel = connTable->size(0); for (k = 0; k < nkernel; k++) { int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; if (i == p) { /* gradient to input */ THTensor_(validXCorr2Dptr)( gradInput_data + i*input_w*input_h, 1.0, gradOutput_data + o*output_w*output_h, output_h, output_w, weight_data + k*kW*kH, kH, kW, dH, dW ); } } } /* clean up */ THTensor_(freeCopyTo)(gradInput, gradInput_); THTensor_(free)(gradOutput); } void THNN_(SpatialFullConvolutionMap_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THArgCheck( gradWeight != NULL && !gradWeight->is_empty() && gradWeight->dim() == 3 && connTable != NULL && connTable->size(0) == gradWeight->size(0), 5, "non-empty 3D gradWeight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE ); /* contiguous */ input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); /* get raw pointers */ real *input_data = THTensor_(data)(input); real *gradOutput_data = THTensor_(data)(gradOutput); real *gradWeight_data = THTensor_(data)(gradWeight); real *gradBias_data = THTensor_(data)(gradBias); /* and dims */ const int64_t input_h = input->size(1); const int64_t input_w = input->size(2); const int64_t output_h = gradOutput->size(1); const int64_t output_w = gradOutput->size(2); const int64_t weight_h = gradWeight->size(1); const int64_t weight_w = gradWeight->size(2); /* gradients wrt bias */ int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nOutputPlane; k++) { real *ptr_gradOutput = gradOutput_data + k*output_w*output_h; int64_t l; for (l = 0; l < output_h*output_w; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; } /* gradients wrt weight */ int nkernel = connTable->size(0); #pragma omp parallel for private(k) for (k = 0; k < nkernel; k++) { int o = (int)THTensor_(get2d)(connTable,k,1) - TH_INDEX_BASE; int i = (int)THTensor_(get2d)(connTable,k,0) - TH_INDEX_BASE; /* gradient to kernel */ THTensor_(validXCorr2DRevptr)( gradWeight_data + k*weight_w*weight_h, scale, gradOutput_data + o*output_w*output_h, output_h, output_w, input_data + i*input_w*input_h, input_h, input_w, dH, dW ); } /* clean up */ THTensor_(free)(input); THTensor_(free)(gradOutput); } #endif
main.c
#include <stdio.h> #include<omp.h> #include<math.h> #include <stdlib.h> int main() { // Define the domain double x_len = 2.0; double y_len = 2.0; int x_points = 151; int y_points = 151; double del_x = x_len/(x_points-1); double del_y = y_len/(y_points-1); double x[x_points], y[y_points]; #pragma omp parallel { #pragma omp for nowait for(int i = 0; i < x_points; i++){ x[i] = i * del_x; } #pragma omp for for(int j = 0; j < y_points; j++){ y[j] = j * del_y; } } // printf("\n The <x,y> co-ordinates are \n"); // for(int i = 0; i < y_points; i++){ // for(int j = 0; j < x_points; j++){ // printf("%f ; %f \n", x[j], y[i]); // } // } // Define parameters int num_time_itrs = 100; // Number of time iterations int num_pres_itrs = 50; // Number of pseudo-time iterations for pressure calculation (Poisson's equations) double rho = 1.0; double nu = 0.1; double del_t = 0.001; double u[y_points][x_points], v[y_points][x_points], p[y_points][x_points]; double u_new[y_points][x_points], v_new[y_points][x_points], p_new[y_points][x_points]; #pragma omp parallel for for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 0.0; v[i][j] = 0.0; p[i][j] = 0.0; u_new[i][j] = 0.0; v_new[i][j] = 0.0; p_new[i][j] = 0.0; } } // Iterations double par_start_time = omp_get_wtime(); #pragma omp parallel { for(int it1 = 0; it1 < num_time_itrs; it1++){ // Velocity calculations #pragma omp for nowait for(int i = 1; i < y_points-1; i++){ for(int j = 1; j < x_points-1; j++){ u_new[i][j] = u[i][j] - u[i][j]*(del_t/del_x)*(u[i][j] - u[i][j-1]) - v[i][j]*(del_t/del_y)*(u[i][j] - u[i-1][j]) - (del_t/(2*rho*del_x))*(p[i][j+1] - p[i][j-1]) + nu*( ((del_t/(del_x*del_x)) * (u[i][j+1] + u[i][j-1] - 2*u[i][j])) + ((del_t/(del_y*del_y))*(u[i+1][j] + u[i-1][j] - 2*u[i][j])) ); v_new[i][j] = v[i][j] - u[i][j]*(del_t/del_x)*(v[i][j] - v[i][j-1]) - v[i][j]*(del_t/del_y)*(v[i][j] - v[i-1][j]) - (del_t/(2*rho*del_y))*(p[i+1][j] - p[i-1][j]) + nu*( ((del_t/(del_x*del_x)) * (v[i][j+1] + v[i][j-1] - 2*v[i][j])) + ((del_t/(del_y*del_y))*(v[i+1][j] + v[i-1][j] - 2*v[i][j])) ); } } // Assign boundary conditions in u and v #pragma omp for nowait for(int i = 0; i < y_points; i++){ u_new[i][0] = 0; // u = 0 at x = 0 u_new[i][x_points-1] = 0; // u = 0 at x = 2 v_new[i][0] = 0; // v = 0 at x = 0 v_new[i][x_points-1] = 0; // v = 0 at x = 2 } #pragma omp for for(int j = 0; j < x_points; j++){ u_new[0][j] = 0; // u = 0 at y = 0 u_new[y_points-1][j] = 1.0; // u = 1 at y = 2 v_new[0][j] = 0; // v = 0 at y = 0 v_new[y_points-1][j] = 0; // v = 0 at y = 2 } // Assign new velocity values to old ones #pragma omp for for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = u_new[i][j]; v[i][j] = v_new[i][j]; } } // End velocity calculations // Pressure calculations // #pragma omp for for(int it2 = 0; it2 < num_pres_itrs; it2++){ #pragma omp for nowait for(int i = 1; i < y_points-1; i++){ for(int j = 1; j < x_points-1; j++){ p_new[i][j] = ( ( (del_y*del_y*(p[i][j+1] + p[i][j-1])) + (del_x*del_x*(p[i+1][j] + p[i-1][j])) ) / (2 * ((del_x*del_x)+(del_y*del_y))) ) - ( ((rho*del_x*del_x*del_y*del_y) / (2 * ((del_x*del_x)+(del_y*del_y)))) * ( ( (1/del_t)*( ( (u[i][j+1] - u[i][j-1])/(2*del_x) ) + ( (v[i+1][j] - v[i-1][j])/(2*del_y) ) ) ) - ( ( (u[i][j+1] - u[i][j-1])/(2*del_x) ) * ( (u[i][j+1] - u[i][j-1])/(2*del_x) ) ) - 2.0*( ((u[i+1][j] - u[i-1][j])/(2*del_y)) * ((v[i][j+1] - v[i][j-1])/(2*del_x)) ) - (( (v[i+1][j] - v[i-1][j])/(2*del_y) ) * ( (v[i+1][j] - v[i-1][j])/(2*del_y) )) )); } } // Boundary conditions in p #pragma omp for for(int i = 0; i < y_points; i++){ p_new[i][0] = p_new[i][1]; // dp/dx = 0 at x = 0 p_new[i][x_points-1] = p_new[i][x_points-2]; // dp/dx = 0 at x = 2 } #pragma omp for for(int j = 0; j < x_points; j++){ p_new[0][j] = p_new[1][j]; // dp/dy = 0 at y = 0 p_new[y_points-1][j] = 0.0; // p = 0 at y = 2 } // Assign new value of p to old p #pragma omp for for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ p[i][j] = p_new[i][j]; } } } // End pressure calculation } } double par_end_time = omp_get_wtime(); // printf("\n New velocity and pressure conditions <u;v;p> \n"); // for(int i = 0; i < y_points; i++){ // for(int j = 0; j < x_points; j++){ // printf("%f ; %f ; %f \n", u[i][j], v[i][j], p[i][j]); // } // } printf("\n Parallel execution time taken is : %f \n", par_end_time - par_start_time); // Serial execution - for comparison // Initializing all velocities and pressure conditions to 0 for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 0.0; v[i][j] = 0.0; p[i][j] = 0.0; u_new[i][j] = 0.0; v_new[i][j] = 0.0; p_new[i][j] = 0.0; } } // Iterations double ser_start_time = omp_get_wtime(); for(int it1 = 0; it1 < num_time_itrs; it1++){ // Velocity calculations for(int i = 1; i < y_points-1; i++){ for(int j = 1; j < x_points-1; j++){ u_new[i][j] = u[i][j] - u[i][j]*(del_t/del_x)*(u[i][j] - u[i][j-1]) - v[i][j]*(del_t/del_y)*(u[i][j] - u[i-1][j]) - (del_t/(2*rho*del_x))*(p[i][j+1] - p[i][j-1]) + nu*( ((del_t/(del_x*del_x)) * (u[i][j+1] + u[i][j-1] - 2*u[i][j])) + ((del_t/(del_y*del_y))*(u[i+1][j] + u[i-1][j] - 2*u[i][j])) ); v_new[i][j] = v[i][j] - u[i][j]*(del_t/del_x)*(v[i][j] - v[i][j-1]) - v[i][j]*(del_t/del_y)*(v[i][j] - v[i-1][j]) - (del_t/(2*rho*del_y))*(p[i+1][j] - p[i-1][j]) + nu*( ((del_t/(del_x*del_x)) * (v[i][j+1] + v[i][j-1] - 2*v[i][j])) + ((del_t/(del_y*del_y))*(v[i+1][j] + v[i-1][j] - 2*v[i][j])) ); } } // Assign boundary conditions in u and v for(int i = 0; i < y_points; i++){ u_new[i][0] = 0; // u = 0 at x = 0 u_new[i][x_points-1] = 0; // u = 0 at x = 2 v_new[i][0] = 0; // v = 0 at x = 0 v_new[i][x_points-1] = 0; // v = 0 at x = 2 } for(int j = 0; j < x_points; j++){ u_new[0][j] = 0; // u = 0 at y = 0 u_new[y_points-1][j] = 1.0; // u = 1 at y = 2 v_new[0][j] = 0; // v = 0 at y = 0 v_new[y_points-1][j] = 0; // v = 0 at y = 2 } // Assign new velocity values to old ones for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = u_new[i][j]; v[i][j] = v_new[i][j]; } } // End velocity calculations // Pressure calculations for(int it2 = 0; it2 < num_pres_itrs; it2++){ for(int i = 1; i < y_points-1; i++){ for(int j = 1; j < x_points-1; j++){ p_new[i][j] = ( ( (del_y*del_y*(p[i][j+1] + p[i][j-1])) + (del_x*del_x*(p[i+1][j] + p[i-1][j])) ) / (2 * ((del_x*del_x)+(del_y*del_y))) ) - ( ((rho*del_x*del_x*del_y*del_y) / (2 * ((del_x*del_x)+(del_y*del_y)))) * ( ( (1/del_t)*( ( (u[i][j+1] - u[i][j-1])/(2*del_x) ) + ( (v[i+1][j] - v[i-1][j])/(2*del_y) ) ) ) - ( ( (u[i][j+1] - u[i][j-1])/(2*del_x) ) * ( (u[i][j+1] - u[i][j-1])/(2*del_x) ) ) - 2.0*( ((u[i+1][j] - u[i-1][j])/(2*del_y)) * ((v[i][j+1] - v[i][j-1])/(2*del_x)) ) - (( (v[i+1][j] - v[i-1][j])/(2*del_y) ) * ( (v[i+1][j] - v[i-1][j])/(2*del_y) )) )); } } // Boundary conditions in p for(int i = 0; i < y_points; i++){ p_new[i][0] = p_new[i][1]; // dp/dx = 0 at x = 0 p_new[i][x_points-1] = p_new[i][x_points-2]; // dp/dx = 0 at x = 2 } for(int j = 0; j < x_points; j++){ p_new[0][j] = p_new[1][j]; // dp/dy = 0 at y = 0 p_new[y_points-1][j] = 0.0; // p = 0 at y = 2 } // Assign new value of p to old p for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ p[i][j] = p_new[i][j]; } } } // End pressure calculation } double ser_end_time = omp_get_wtime(); // printf("\n New velocity and pressure conditions <u;v;p> \n"); // for(int i = 0; i < y_points; i++){ // for(int j = 0; j < x_points; j++){ // printf("%f ; %f ; %f \n", u[i][j], v[i][j], p[i][j]); // } // } printf("\n Serial execution time taken is : %f \n", ser_end_time - ser_start_time); printf("\n Speedup is : %f \n", (ser_end_time - ser_start_time)/(par_end_time - par_start_time)); return 0; }
nanort.h
// // NanoRT, single header only modern ray tracing kernel. // // // Notes : The number of primitives are up to 2G. If you want to render large // data, please split data into chunks(~ 2G prims) and use NanoSG scene graph // library(`${nanort}/examples/nanosg`). // /* The MIT License (MIT) Copyright (c) 2015 - 2019 Light Transport Entertainment, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef NANORT_H_ #define NANORT_H_ #include <algorithm> #include <cassert> #include <chrono> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <functional> #include <iostream> #include <limits> #include <memory> #include <queue> #include <string> #include <vector> // #include "Vintersector.h" // #include "verilated.h" #include "simulator.hpp" // compiler macros // // NANORT_USE_CPP11_FEATURE : Enable C++11 feature // NANORT_ENABLE_PARALLEL_BUILD : Enable parallel BVH build. // NANORT_ENABLE_SERIALIZATION : Enable serialization feature for built BVH. // // Parallelized BVH build is supported on C++11 thread version. // OpenMP version is not fully tested. // thus turn off if you face a problem when building BVH in parallel. // #define NANORT_ENABLE_PARALLEL_BUILD // Some constants #define kNANORT_MAX_STACK_DEPTH (512) #define kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD (1024 * 8) #define kNANORT_SHALLOW_DEPTH (4) // will create 2**N subtrees #ifdef NANORT_USE_CPP11_FEATURE // Assume C++11 compiler has thread support. // In some situation (e.g. embedded system, JIT compilation), thread feature // may not be available though... #include <atomic> #include <mutex> #include <thread> #define kNANORT_MAX_THREADS (256) // Parallel build should work well for C++11 version, thus force enable it. #ifndef NANORT_ENABLE_PARALLEL_BUILD #define NANORT_ENABLE_PARALLEL_BUILD #endif #endif namespace nanort { // RayType typedef enum { RAY_TYPE_NONE = 0x0, RAY_TYPE_PRIMARY = 0x1, RAY_TYPE_SECONDARY = 0x2, RAY_TYPE_DIFFUSE = 0x4, RAY_TYPE_REFLECTION = 0x8, RAY_TYPE_REFRACTION = 0x10 } RayType; #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif // ---------------------------------------------------------------------------- // Small vector class useful for multi-threaded environment. // // stack_container.h // // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer // would be allocated on the stack and allows us to avoid heap operations in // some situations. // // STL likes to make copies of allocators, so the allocator itself can't hold // the data. Instead, we make the creator responsible for creating a // StackAllocator::Source which contains the data. Copying the allocator // merely copies the pointer to this shared source, so all allocators created // based on our allocator will share the same stack buffer. // // This stack buffer implementation is very simple. The first allocation that // fits in the stack buffer will use the stack buffer. Any subsequent // allocations will not use the stack buffer, even if there is unused room. // This makes it appropriate for array-like containers, but the caller should // be sure to reserve() in the container up to the stack buffer size. Otherwise // the container will allocate a small array which will "use up" the stack // buffer. template <typename T, size_t stack_capacity> class StackAllocator : public std::allocator<T> { public: typedef typename std::allocator<T>::pointer pointer; typedef typename std::allocator<T>::size_type size_type; // Backing store for the allocator. The container owner is responsible for // maintaining this for as long as any containers using this allocator are // live. struct Source { Source() : used_stack_buffer_(false) {} // Casts the buffer in its right type. T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); } const T *stack_buffer() const { return reinterpret_cast<const T *>(stack_buffer_); } // // IMPORTANT: Take care to ensure that stack_buffer_ is aligned // since it is used to mimic an array of T. // Be careful while declaring any unaligned types (like bool) // before stack_buffer_. // // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. char stack_buffer_[sizeof(T[stack_capacity])]; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. bool used_stack_buffer_; }; // Used by containers when they want to refer to an allocator of type U. template <typename U> struct rebind { typedef StackAllocator<U, stack_capacity> other; }; // For the straight up copy c-tor, we can share storage. StackAllocator(const StackAllocator<T, stack_capacity> &rhs) : source_(rhs.source_) {} // ISO C++ requires the following constructor to be defined, // and std::vector in VC++2008SP1 Release fails with an error // in the class _Container_base_aux_alloc_real (from <xutility>) // if the constructor does not exist. // For this constructor, we cannot share storage; there's // no guarantee that the Source buffer of Ts is large enough // for Us. // TODO(Google): If we were fancy pants, perhaps we could share storage // iff sizeof(T) == sizeof(U). template <typename U, size_t other_capacity> StackAllocator(const StackAllocator<U, other_capacity> &other) : source_(NULL) { (void)other; } explicit StackAllocator(Source *source) : source_(source) {} // Actually do the allocation. Use the stack buffer if nobody has used it yet // and the size requested fits. Otherwise, fall through to the standard // allocator. pointer allocate(size_type n, void *hint = 0) { if (source_ != NULL && !source_->used_stack_buffer_ && n <= stack_capacity) { source_->used_stack_buffer_ = true; return source_->stack_buffer(); } else { return std::allocator<T>::allocate(n, hint); } } // Free: when trying to free the stack buffer, just mark it as free. For // non-stack-buffer pointers, just fall though to the standard allocator. void deallocate(pointer p, size_type n) { if (source_ != NULL && p == source_->stack_buffer()) source_->used_stack_buffer_ = false; else std::allocator<T>::deallocate(p, n); } private: Source *source_; }; // A wrapper around STL containers that maintains a stack-sized buffer that the // initial capacity of the vector is based on. Growing the container beyond the // stack capacity will transparently overflow onto the heap. The container must // support reserve(). // // WATCH OUT: the ContainerType MUST use the proper StackAllocator for this // type. This object is really intended to be used only internally. You'll want // to use the wrappers below for different types. template <typename TContainerType, int stack_capacity> class StackContainer { public: typedef TContainerType ContainerType; typedef typename ContainerType::value_type ContainedType; typedef StackAllocator<ContainedType, stack_capacity> Allocator; // Allocator must be constructed before the container! StackContainer() : allocator_(&stack_data_), container_(allocator_) { // Make the container use the stack allocation by reserving our buffer size // before doing anything else. container_.reserve(stack_capacity); } // Getters for the actual container. // // Danger: any copies of this made using the copy constructor must have // shorter lifetimes than the source. The copy will share the same allocator // and therefore the same stack buffer as the original. Use std::copy to // copy into a "real" container for longer-lived objects. ContainerType &container() { return container_; } const ContainerType &container() const { return container_; } // Support operator-> to get to the container. This allows nicer syntax like: // StackContainer<...> foo; // std::sort(foo->begin(), foo->end()); ContainerType *operator->() { return &container_; } const ContainerType *operator->() const { return &container_; } #ifdef UNIT_TEST // Retrieves the stack source so that that unit tests can verify that the // buffer is being used properly. const typename Allocator::Source &stack_data() const { return stack_data_; } #endif protected: typename Allocator::Source stack_data_; unsigned char pad_[7]; Allocator allocator_; ContainerType container_; // DISALLOW_EVIL_CONSTRUCTORS(StackContainer); StackContainer(const StackContainer &); void operator=(const StackContainer &); }; // StackVector // // Example: // StackVector<int, 16> foo; // foo->push_back(22); // we have overloaded operator-> // foo[0] = 10; // as well as operator[] template <typename T, size_t stack_capacity> class StackVector : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity> { public: StackVector() : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() {} // We need to put this in STL containers sometimes, which requires a copy // constructor. We can't call the regular copy constructor because that will // take the stack buffer from the original. Here, we create an empty object // and make a stack buffer of its own. StackVector(const StackVector<T, stack_capacity> &other) : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() { this->container().assign(other->begin(), other->end()); } StackVector<T, stack_capacity> &operator=( const StackVector<T, stack_capacity> &other) { this->container().assign(other->begin(), other->end()); return *this; } // Vectors are commonly indexed, which isn't very convenient even with // operator-> (using "->at()" does exception stuff we don't want). T &operator[](size_t i) { return this->container().operator[](i); } const T &operator[](size_t i) const { return this->container().operator[](i); } }; // ---------------------------------------------------------------------------- template <typename T = float> class real3 { public: real3() {} real3(T x) { v[0] = x; v[1] = x; v[2] = x; } real3(T xx, T yy, T zz) { v[0] = xx; v[1] = yy; v[2] = zz; } explicit real3(const T *p) { v[0] = p[0]; v[1] = p[1]; v[2] = p[2]; } inline T x() const { return v[0]; } inline T y() const { return v[1]; } inline T z() const { return v[2]; } real3 operator*(T f) const { return real3(x() * f, y() * f, z() * f); } real3 operator-(const real3 &f2) const { return real3(x() - f2.x(), y() - f2.y(), z() - f2.z()); } real3 operator*(const real3 &f2) const { return real3(x() * f2.x(), y() * f2.y(), z() * f2.z()); } real3 operator+(const real3 &f2) const { return real3(x() + f2.x(), y() + f2.y(), z() + f2.z()); } real3 &operator+=(const real3 &f2) { v[0] += f2.x(); v[1] += f2.y(); v[2] += f2.z(); return (*this); } real3 operator/(const real3 &f2) const { return real3(x() / f2.x(), y() / f2.y(), z() / f2.z()); } real3 operator-() const { return real3(-x(), -y(), -z()); } T operator[](int i) const { return v[i]; } T &operator[](int i) { return v[i]; } T v[3]; // T pad; // for alignment (when T = float) }; template <typename T> inline real3<T> operator*(T f, const real3<T> &v) { return real3<T>(v.x() * f, v.y() * f, v.z() * f); } template <typename T> inline real3<T> vneg(const real3<T> &rhs) { return real3<T>(-rhs.x(), -rhs.y(), -rhs.z()); } template <typename T> inline T vlength(const real3<T> &rhs) { return std::sqrt(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z()); } template <typename T> inline real3<T> vnormalize(const real3<T> &rhs) { real3<T> v = rhs; T len = vlength(rhs); if (std::fabs(len) > std::numeric_limits<T>::epsilon()) { T inv_len = static_cast<T>(1.0) / len; v.v[0] *= inv_len; v.v[1] *= inv_len; v.v[2] *= inv_len; } return v; } template <typename T> inline real3<T> vcross(const real3<T> a, const real3<T> b) { real3<T> c; c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; return c; } template <typename T> inline T vdot(const real3<T> a, const real3<T> b) { return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; } template <typename T> inline real3<T> vsafe_inverse(const real3<T> v) { real3<T> r; #ifdef NANORT_USE_CPP11_FEATURE if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { r[0] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[0]); } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { r[1] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[1]); } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { r[2] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[2]); } else { r[2] = static_cast<T>(1.0) / v[2]; } #else if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[0] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[0] = std::numeric_limits<T>::infinity() * sgn; } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[1] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[1] = std::numeric_limits<T>::infinity() * sgn; } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[2] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[2] = std::numeric_limits<T>::infinity() * sgn; } else { r[2] = static_cast<T>(1.0) / v[2]; } #endif return r; } template <typename real> inline const real *get_vertex_addr(const real *p, const size_t idx, const size_t stride_bytes) { return reinterpret_cast<const real *>( reinterpret_cast<const unsigned char *>(p) + idx * stride_bytes); } template <typename T = float> class Ray { public: Ray() : min_t(static_cast<T>(0.0)), max_t(std::numeric_limits<T>::max()), type(RAY_TYPE_NONE) { org[0] = static_cast<T>(0.0); org[1] = static_cast<T>(0.0); org[2] = static_cast<T>(0.0); dir[0] = static_cast<T>(0.0); dir[1] = static_cast<T>(0.0); dir[2] = static_cast<T>(-1.0); } T org[3]; // must set T dir[3]; // must set T min_t; // minimum ray hit distance. T max_t; // maximum ray hit distance. unsigned int type; // ray type // TODO(LTE): Align sizeof(Ray) }; template <typename T = float> class BVHNode { public: BVHNode() {} BVHNode(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; } BVHNode &operator=(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; return (*this); } ~BVHNode() {} T bmin[3]; T bmax[3]; int flag; // 1 = leaf node, 0 = branch node int axis; // leaf // data[0] = npoints // data[1] = index // // branch // data[0] = child[0] // data[1] = child[1] unsigned int data[2]; }; template <class H> class IntersectComparator { public: bool operator()(const H &a, const H &b) const { return a.t < b.t; } }; /// BVH build option. template <typename T = float> struct BVHBuildOptions { T cost_t_aabb; unsigned int min_leaf_primitives; unsigned int max_tree_depth; unsigned int bin_size; unsigned int shallow_depth; unsigned int min_primitives_for_parallel_build; // Cache bounding box computation. // Requires more memory, but BVHbuild can be faster. bool cache_bbox; unsigned char pad[3]; // Set default value: Taabb = 0.2 BVHBuildOptions() : cost_t_aabb(static_cast<T>(0.2)), min_leaf_primitives(4), max_tree_depth(256), bin_size(64), shallow_depth(kNANORT_SHALLOW_DEPTH), min_primitives_for_parallel_build( kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD), cache_bbox(false) {} }; /// BVH build statistics. class BVHBuildStatistics { public: unsigned int max_tree_depth; unsigned int num_leaf_nodes; unsigned int num_branch_nodes; float build_secs; // Set default value: Taabb = 0.2 BVHBuildStatistics() : max_tree_depth(0), num_leaf_nodes(0), num_branch_nodes(0), build_secs(0.0f) {} }; /// /// @brief BVH trace option. /// class BVHTraceOptions { public: // Hit only for face IDs in indexRange. // This feature is good to mimic something like glDrawArrays() unsigned int prim_ids_range[2]; // Prim ID to skip for avoiding self-intersection // -1 = no skipping unsigned int skip_prim_id; bool cull_back_face; unsigned char pad[3]; ///< Padding (not used) BVHTraceOptions() { prim_ids_range[0] = 0; prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs. skip_prim_id = static_cast<unsigned int>(-1); cull_back_face = false; } }; /// /// @brief Bounding box. /// template <typename T> class BBox { public: real3<T> bmin; real3<T> bmax; BBox() { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } }; /// /// @brief Hit class for traversing nodes. /// /// Stores hit information of node traversal. /// Node traversal is used for two-level ray tracing(efficient ray traversal of /// a scene hierarchy) /// template <typename T> class NodeHit { public: NodeHit() : t_min(std::numeric_limits<T>::max()), t_max(-std::numeric_limits<T>::max()), node_id(static_cast<unsigned int>(-1)) {} NodeHit(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; } NodeHit &operator=(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; return (*this); } ~NodeHit() {} T t_min; T t_max; unsigned int node_id; }; /// /// @brief Comparator object for NodeHit. /// /// Comparator object for finding nearest hit point in node traversal. /// template <typename T> class NodeHitComparator { public: inline bool operator()(const NodeHit<T> &a, const NodeHit<T> &b) { return a.t_min < b.t_min; } }; /// /// @brief Bounding Volume Hierarchy acceleration. /// /// BVHAccel is central part of ray tracing(ray traversal). /// BVHAccel takes an input geometry(primitive) information and build a data /// structure for efficient ray tracing(`O(log2 N)` in theory, where N is the /// number of primitive in the scene). /// /// @tparam T real value type(float or double). /// template <typename T> class BVHAccel { public: BVHAccel() : pad0_(0) { (void)pad0_; simulator = new Simulator(); } ~BVHAccel() {} Simulator *simulator; /// /// Build BVH for input primitives. /// /// @tparam Prim Primitive(e.g. Triangle) accessor class. /// @tparam Pred Predicator(comparator class object for `Prim` class to find /// nearest hit point) /// /// @param[in] num_primitives The number of primitive. /// @param[in] p Primitive accessor class object. /// @param[in] pred Predicator object. /// /// @return true upon success. /// template <class Prim, class Pred> bool Build(const unsigned int num_primitives, const Prim &p, const Pred &pred, const BVHBuildOptions<T> &options = BVHBuildOptions<T>()); /// /// Get statistics of built BVH tree. Valid after `Build()` /// /// @return BVH build statistics. /// BVHBuildStatistics GetStatistics() const { return stats_; } #if defined(NANORT_ENABLE_SERIALIZATION) /// /// Dump built BVH to the file. /// bool Dump(const char *filename) const; bool Dump(FILE *fp) const; /// /// Load BVH binary /// bool Load(const char *filename); bool Load(FILE *fp); #endif void Debug(); /// /// @brief Traverse into BVH along ray and find closest hit point & primitive /// if found /// /// @tparam I Intersector class /// @tparam H Hit class /// /// @param[in] ray Input ray /// @param[in] intersector Intersector object. This object is called for each /// possible intersection of ray and BVH during traversal. /// @param[out] isect Intersection point information(filled when closest hit /// point was found) /// @param[in] options Traversal options. /// /// @return true if the closest hit point found. /// template <class I, class H> bool Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options = BVHTraceOptions()) const; #if 0 /// Multi-hit ray traversal /// Returns `max_intersections` frontmost intersections template<class I, class H, class Comp> bool MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *isects, const BVHTraceOptions &options = BVHTraceOptions()) const; #endif /// /// List up nodes which intersects along the ray. /// This function is useful for two-level BVH traversal. /// See `examples/nanosg` for example. /// /// @tparam I Intersection class /// /// /// template <class I> bool ListNodeIntersections(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const; const std::vector<BVHNode<T> > &GetNodes() const { return nodes_; } const std::vector<unsigned int> &GetIndices() const { return indices_; } /// /// Returns bounding box of built BVH. /// void BoundingBox(T bmin[3], T bmax[3]) const { if (nodes_.empty()) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } else { bmin[0] = nodes_[0].bmin[0]; bmin[1] = nodes_[0].bmin[1]; bmin[2] = nodes_[0].bmin[2]; bmax[0] = nodes_[0].bmax[0]; bmax[1] = nodes_[0].bmax[1]; bmax[2] = nodes_[0].bmax[2]; } } bool IsValid() const { return nodes_.size() > 0; } private: #if defined(NANORT_ENABLE_PARALLEL_BUILD) typedef struct { unsigned int left_idx; unsigned int right_idx; unsigned int offset; } ShallowNodeInfo; // Used only during BVH construction std::vector<ShallowNodeInfo> shallow_node_infos_; /// Builds shallow BVH tree recursively. template <class P, class Pred> unsigned int BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred); #endif /// Builds BVH tree recursively. template <class P, class Pred> unsigned int BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred); template <class I> bool TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; template <class I> bool TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const; #if 0 template<class I, class H, class Comp> bool MultiHitTestLeafNode(std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; #endif std::vector<BVHNode<T> > nodes_; std::vector<unsigned int> indices_; // max 4G triangles. std::vector<BBox<T> > bboxes_; BVHBuildOptions<T> options_; BVHBuildStatistics stats_; unsigned int pad0_; }; // Predefined SAH predicator for triangle. template <typename T = float> class TriangleSAHPred { public: TriangleSAHPred( const T *vertices, const unsigned int *faces, size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : axis_(0), pos_(static_cast<T>(0.0)), vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} TriangleSAHPred(const TriangleSAHPred<T> &rhs) : axis_(rhs.axis_), pos_(rhs.pos_), vertices_(rhs.vertices_), faces_(rhs.faces_), vertex_stride_bytes_(rhs.vertex_stride_bytes_) {} TriangleSAHPred<T> &operator=(const TriangleSAHPred<T> &rhs) { axis_ = rhs.axis_; pos_ = rhs.pos_; vertices_ = rhs.vertices_; faces_ = rhs.faces_; vertex_stride_bytes_ = rhs.vertex_stride_bytes_; return (*this); } void Set(int axis, T pos) const { axis_ = axis; pos_ = pos; } bool operator()(unsigned int i) const { int axis = axis_; T pos = pos_; unsigned int i0 = faces_[3 * i + 0]; unsigned int i1 = faces_[3 * i + 1]; unsigned int i2 = faces_[3 * i + 2]; real3<T> p0(get_vertex_addr<T>(vertices_, i0, vertex_stride_bytes_)); real3<T> p1(get_vertex_addr<T>(vertices_, i1, vertex_stride_bytes_)); real3<T> p2(get_vertex_addr<T>(vertices_, i2, vertex_stride_bytes_)); T center = p0[axis] + p1[axis] + p2[axis]; return (center < pos * static_cast<T>(3.0)); } private: mutable int axis_; mutable T pos_; const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; // Predefined Triangle mesh geometry. template <typename T = float> class TriangleMesh { public: TriangleMesh( const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} /// Compute bounding box for `prim_index`th triangle. /// This function is called for each primitive in BVH build. void BoundingBox(real3<T> *bmin, real3<T> *bmax, unsigned int prim_index) const { unsigned vertex = faces_[3 * prim_index + 0]; (*bmin)[0] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[0]; (*bmin)[1] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[1]; (*bmin)[2] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[2]; (*bmax)[0] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[0]; (*bmax)[1] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[1]; (*bmax)[2] = get_vertex_addr(vertices_, vertex, vertex_stride_bytes_)[2]; // remaining two vertices of the primitive for (unsigned int i = 1; i < 3; i++) { // xyz for (int k = 0; k < 3; k++) { T coord = get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; (*bmin)[k] = std::min((*bmin)[k], coord); (*bmax)[k] = std::max((*bmax)[k], coord); } } } const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; // // Accessors // const T *GetVertices() const { return vertices_; } const unsigned int *GetFaces() const { return faces_; } size_t GetVertexStrideBytes() const { return vertex_stride_bytes_; } }; /// /// Stores intersection point information for triangle geometry. /// template <typename T = float> class TriangleIntersection { public: T u; T v; // Required member variables. T t; unsigned int prim_id; }; /// /// Intersector is a template class which implements intersection method and /// stores intesection point information(`H`) /// /// @tparam T Precision(float or double) /// @tparam H Intersection point information struct /// template <typename T = float, class H = TriangleIntersection<T> > class TriangleIntersector { public: // Initialize from mesh object. // M: mesh class template <class M> TriangleIntersector(const M &m) : vertices_(m.GetVertices()), faces_(m.GetFaces()), vertex_stride_bytes_(m.GetVertexStrideBytes()) {} template <class M> TriangleIntersector(const M *m) : vertices_(m->GetVertices()), faces_(m->GetFaces()), vertex_stride_bytes_(m->GetVertexStrideBytes()) {} TriangleIntersector(const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. // vertex_stride_bytes // = 12 = sizeof(float) // * 3 : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} // For Watertight Ray/Triangle Intersection. typedef struct { T Sx; T Sy; T Sz; int kx; int ky; int kz; } RayCoeff; /// Do ray intersection stuff for `prim_index` th primitive and return hit /// distance `t`, barycentric coordinate `u` and `v`. /// Returns true if there's intersection. bool Intersect(T *t_inout, const unsigned int prim_index) const { if ((prim_index < trace_options_.prim_ids_range[0]) || (prim_index >= trace_options_.prim_ids_range[1])) { return false; } // Self-intersection test. if (prim_index == trace_options_.skip_prim_id) { return false; } const unsigned int f0 = faces_[3 * prim_index + 0]; const unsigned int f1 = faces_[3 * prim_index + 1]; const unsigned int f2 = faces_[3 * prim_index + 2]; const real3<T> p0(get_vertex_addr(vertices_, f0 + 0, vertex_stride_bytes_)); const real3<T> p1(get_vertex_addr(vertices_, f1 + 0, vertex_stride_bytes_)); const real3<T> p2(get_vertex_addr(vertices_, f2 + 0, vertex_stride_bytes_)); const real3<T> A = p0 - ray_org_; const real3<T> B = p1 - ray_org_; const real3<T> C = p2 - ray_org_; const T Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz]; const T Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz]; const T Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz]; const T By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz]; const T Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz]; const T Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz]; T U = Cx * By - Cy * Bx; T V = Ax * Cy - Ay * Cx; T W = Bx * Ay - By * Ax; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wfloat-equal" #endif // Fall back to test against edges using double precision. if (U == static_cast<T>(0.0) || V == static_cast<T>(0.0) || W == static_cast<T>(0.0)) { double CxBy = static_cast<double>(Cx) * static_cast<double>(By); double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx); U = static_cast<T>(CxBy - CyBx); double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy); double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx); V = static_cast<T>(AxCy - AyCx); double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay); double ByAx = static_cast<double>(By) * static_cast<double>(Ax); W = static_cast<T>(BxAy - ByAx); } if (U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) { if (trace_options_.cull_back_face || (U > static_cast<T>(0.0) || V > static_cast<T>(0.0) || W > static_cast<T>(0.0))) { return false; } } T det = U + V + W; if (det == static_cast<T>(0.0)) return false; #ifdef __clang__ #pragma clang diagnostic pop #endif const T Az = ray_coeff_.Sz * A[ray_coeff_.kz]; const T Bz = ray_coeff_.Sz * B[ray_coeff_.kz]; const T Cz = ray_coeff_.Sz * C[ray_coeff_.kz]; const T D = U * Az + V * Bz + W * Cz; const T rcpDet = static_cast<T>(1.0) / det; T tt = D * rcpDet; if (tt > (*t_inout)) { return false; } if (tt < t_min_) { return false; } (*t_inout) = tt; // Use Möller-Trumbore style barycentric coordinates // U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2 // We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2; // => u = V, v = W. u_ = V * rcpDet; v_ = W * rcpDet; return true; } /// Returns the nearest hit distance. T GetT() const { return t_; } /// Update is called when initializing intersection and nearest hit is found. void Update(T t, unsigned int prim_idx) const { t_ = t; prim_id_ = prim_idx; } /// Prepare BVH traversal (e.g. compute inverse ray direction) /// This function is called only once in BVH traversal. void PrepareTraversal(const Ray<T> &ray, const BVHTraceOptions &trace_options) const { ray_org_[0] = ray.org[0]; ray_org_[1] = ray.org[1]; ray_org_[2] = ray.org[2]; // Calculate dimension where the ray direction is maximal. ray_coeff_.kz = 0; T absDir = std::fabs(ray.dir[0]); if (absDir < std::fabs(ray.dir[1])) { ray_coeff_.kz = 1; absDir = std::fabs(ray.dir[1]); } if (absDir < std::fabs(ray.dir[2])) { ray_coeff_.kz = 2; absDir = std::fabs(ray.dir[2]); } ray_coeff_.kx = ray_coeff_.kz + 1; if (ray_coeff_.kx == 3) ray_coeff_.kx = 0; ray_coeff_.ky = ray_coeff_.kx + 1; if (ray_coeff_.ky == 3) ray_coeff_.ky = 0; // Swap kx and ky dimension to preserve winding direction of triangles. if (ray.dir[ray_coeff_.kz] < static_cast<T>(0.0)) std::swap(ray_coeff_.kx, ray_coeff_.ky); // Calculate shear constants. ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sz = static_cast<T>(1.0) / ray.dir[ray_coeff_.kz]; trace_options_ = trace_options; t_min_ = ray.min_t; u_ = static_cast<T>(0.0); v_ = static_cast<T>(0.0); } /// Post BVH traversal stuff. /// Fill `isect` if there is a hit. void PostTraversal(const Ray<T> &ray, bool hit, H *isect) const { if (hit && isect) { (*isect).t = t_; (*isect).u = u_; (*isect).v = v_; (*isect).prim_id = prim_id_; } (void)ray; } private: const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; mutable real3<T> ray_org_; mutable RayCoeff ray_coeff_; mutable BVHTraceOptions trace_options_; mutable T t_min_; mutable T t_; mutable T u_; mutable T v_; mutable unsigned int prim_id_; }; // // Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf // // NaN-safe min and max function. template <class T> const T &safemin(const T &a, const T &b) { return (a < b) ? a : b; } template <class T> const T &safemax(const T &a, const T &b) { return (a > b) ? a : b; } // // SAH functions // struct BinBuffer { explicit BinBuffer(unsigned int size) { bin_size = size; bin.resize(2 * 3 * size); clear(); } void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); } std::vector<size_t> bin; // (min, max) * xyz * binsize unsigned int bin_size; unsigned int pad0; }; template <typename T> inline T CalculateSurfaceArea(const real3<T> &min, const real3<T> &max) { real3<T> box = max - min; return static_cast<T>(2.0) * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]); } template <typename T> inline void GetBoundingBoxOfTriangle(real3<T> *bmin, real3<T> *bmax, const T *vertices, const unsigned int *faces, unsigned int index) { unsigned int f0 = faces[3 * index + 0]; unsigned int f1 = faces[3 * index + 1]; unsigned int f2 = faces[3 * index + 2]; real3<T> p[3]; p[0] = real3<T>(&vertices[3 * f0]); p[1] = real3<T>(&vertices[3 * f1]); p[2] = real3<T>(&vertices[3 * f2]); (*bmin) = p[0]; (*bmax) = p[0]; for (int i = 1; i < 3; i++) { (*bmin)[0] = std::min((*bmin)[0], p[i][0]); (*bmin)[1] = std::min((*bmin)[1], p[i][1]); (*bmin)[2] = std::min((*bmin)[2], p[i][2]); (*bmax)[0] = std::max((*bmax)[0], p[i][0]); (*bmax)[1] = std::max((*bmax)[1], p[i][1]); (*bmax)[2] = std::max((*bmax)[2], p[i][2]); } } template <typename T, class P> inline void ContributeBinBuffer(BinBuffer *bins, // [out] const real3<T> &scene_min, const real3<T> &scene_max, unsigned int *indices, unsigned int left_idx, unsigned int right_idx, const P &p) { T bin_size = static_cast<T>(bins->bin_size); // Calculate extent real3<T> scene_size, scene_inv_size; scene_size = scene_max - scene_min; for (int i = 0; i < 3; ++i) { assert(scene_size[i] >= static_cast<T>(0.0)); if (scene_size[i] > static_cast<T>(0.0)) { scene_inv_size[i] = bin_size / scene_size[i]; } else { scene_inv_size[i] = static_cast<T>(0.0); } } // Clear bin data std::fill(bins->bin.begin(), bins->bin.end(), 0); // memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size)); size_t idx_bmin[3]; size_t idx_bmax[3]; for (size_t i = left_idx; i < right_idx; i++) { // // Quantize the position into [0, BIN_SIZE) // // q[i] = (int)(p[i] - scene_bmin) / scene_size // real3<T> bmin; real3<T> bmax; p.BoundingBox(&bmin, &bmax, indices[i]); // GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]); real3<T> quantized_bmin = (bmin - scene_min) * scene_inv_size; real3<T> quantized_bmax = (bmax - scene_min) * scene_inv_size; // idx is now in [0, BIN_SIZE) for (int j = 0; j < 3; ++j) { int q0 = static_cast<int>(quantized_bmin[j]); if (q0 < 0) q0 = 0; int q1 = static_cast<int>(quantized_bmax[j]); if (q1 < 0) q1 = 0; idx_bmin[j] = static_cast<unsigned int>(q0); idx_bmax[j] = static_cast<unsigned int>(q1); if (idx_bmin[j] >= bin_size) idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1; if (idx_bmax[j] >= bin_size) idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1; // Increment bin counter bins->bin[0 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1; bins->bin[1 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1; } } } template <typename T> inline T SAH(size_t ns1, T leftArea, size_t ns2, T rightArea, T invS, T Taabb, T Ttri) { T sah; sah = static_cast<T>(2.0) * Taabb + (leftArea * invS) * static_cast<T>(ns1) * Ttri + (rightArea * invS) * static_cast<T>(ns2) * Ttri; return sah; } template <typename T> inline bool FindCutFromBinBuffer(T *cut_pos, // [out] xyz int *minCostAxis, // [out] const BinBuffer *bins, const real3<T> &bmin, const real3<T> &bmax, size_t num_primitives, T costTaabb) { // should be in [0.0, 1.0] const T kEPS = std::numeric_limits<T>::epsilon(); // * epsScale; size_t left, right; real3<T> bsize, bstep; real3<T> bminLeft, bmaxLeft; real3<T> bminRight, bmaxRight; T saLeft, saRight, saTotal; T pos; T minCost[3]; T costTtri = static_cast<T>(1.0) - costTaabb; (*minCostAxis) = 0; bsize = bmax - bmin; bstep = bsize * (static_cast<T>(1.0) / bins->bin_size); saTotal = CalculateSurfaceArea(bmin, bmax); T invSaTotal = static_cast<T>(0.0); if (saTotal > kEPS) { invSaTotal = static_cast<T>(1.0) / saTotal; } for (int j = 0; j < 3; ++j) { // // Compute SAH cost for the right side of each cell of the bbox. // Exclude both extreme sides of the bbox. // // i: 0 1 2 3 // +----+----+----+----+----+ // | | | | | | // +----+----+----+----+----+ // T minCostPos = bmin[j] + static_cast<T>(1.0) * bstep[j]; minCost[j] = std::numeric_limits<T>::max(); left = 0; right = num_primitives; bminLeft = bminRight = bmin; bmaxLeft = bmaxRight = bmax; for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) { left += bins->bin[0 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; right -= bins->bin[1 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; assert(left <= num_primitives); assert(right <= num_primitives); // // Split pos bmin + (i + 1) * (bsize / BIN_SIZE) // +1 for i since we want a position on right side of the cell. // pos = bmin[j] + (i + static_cast<T>(1.0)) * bstep[j]; bmaxLeft[j] = pos; bminRight[j] = pos; saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft); saRight = CalculateSurfaceArea(bminRight, bmaxRight); T cost = SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri); if (cost < minCost[j]) { // // Update the min cost // minCost[j] = cost; minCostPos = pos; // minCostAxis = j; } } cut_pos[j] = minCostPos; } // cut_axis = minCostAxis; // cut_pos = minCostPos; // Find min cost axis T cost = minCost[0]; (*minCostAxis) = 0; if (cost > minCost[1]) { (*minCostAxis) = 1; cost = minCost[1]; } if (cost > minCost[2]) { (*minCostAxis) = 2; cost = minCost[2]; } return true; } #ifdef _OPENMP template <typename T, class P> void ComputeBoundingBoxOMP(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { p.BoundingBox(bmin, bmax, indices[left_index]); } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; unsigned int n = right_index - left_index; #pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128)) { #pragma omp parallel for // for each face for (int i = int(left_index); i < int(right_index); i++) { unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); // xyz for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], bbox_min[k]); (*bmax)[k] = std::max((*bmax)[k], bbox_max[k]); } } #pragma omp critical { for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], local_bmin[k]); (*bmax)[k] = std::max((*bmax)[k], local_bmax[k]); } } } } #endif #ifdef NANORT_USE_CPP11_FEATURE template <typename T, class P> inline void ComputeBoundingBoxThreaded(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { unsigned int n = right_index - left_index; size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; std::vector<T> local_bmins(3 * num_threads); // 3 = xyz std::vector<T> local_bmaxs(3 * num_threads); // 3 = xyz for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = left_index + t * ndiv; size_t ei = (t == (num_threads - 1)) ? size_t(right_index) : std::min(left_index + (t + 1) * ndiv, size_t(right_index)); local_bmins[3 * t + 0] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 1] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 2] = std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 0] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 1] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 2] = -std::numeric_limits<T>::infinity(); // for each face for (size_t i = si; i < ei; i++) { unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); // xyz for (size_t k = 0; k < 3; k++) { local_bmins[3 * t + k] = std::min(local_bmins[3 * t + k], bbox_min[int(k)]); local_bmaxs[3 * t + k] = std::max(local_bmaxs[3 * t + k], bbox_max[int(k)]); } } })); } for (auto &t : workers) { t.join(); } // merge bbox for (size_t k = 0; k < 3; k++) { (*bmin)[int(k)] = local_bmins[k]; (*bmax)[int(k)] = local_bmaxs[k]; } for (size_t t = 1; t < num_threads; t++) { for (size_t k = 0; k < 3; k++) { (*bmin)[int(k)] = std::min((*bmin)[int(k)], local_bmins[3 * t + k]); (*bmax)[int(k)] = std::max((*bmax)[int(k)], local_bmaxs[3 * t + k]); } } } #endif template <typename T, class P> inline void ComputeBoundingBox(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { unsigned int idx = indices[left_index]; p.BoundingBox(bmin, bmax, idx); { // for each primitive for (unsigned int i = left_index + 1; i < right_index; i++) { idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); // xyz for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], bbox_min[k]); (*bmax)[k] = std::max((*bmax)[k], bbox_max[k]); } } } } template <typename T> inline void GetBoundingBox(real3<T> *bmin, real3<T> *bmax, const std::vector<BBox<T> > &bboxes, unsigned int *indices, unsigned int left_index, unsigned int right_index) { unsigned int i = left_index; unsigned int idx = indices[i]; (*bmin)[0] = bboxes[idx].bmin[0]; (*bmin)[1] = bboxes[idx].bmin[1]; (*bmin)[2] = bboxes[idx].bmin[2]; (*bmax)[0] = bboxes[idx].bmax[0]; (*bmax)[1] = bboxes[idx].bmax[1]; (*bmax)[2] = bboxes[idx].bmax[2]; // for each face for (i = left_index + 1; i < right_index; i++) { idx = indices[i]; // xyz for (int k = 0; k < 3; k++) { (*bmin)[k] = std::min((*bmin)[k], bboxes[idx].bmin[k]); (*bmax)[k] = std::max((*bmax)[k], bboxes[idx].bmax[k]); } } } // // -- // #if defined(NANORT_ENABLE_PARALLEL_BUILD) template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (stats_.max_tree_depth < depth) { stats_.max_tree_depth = depth; } real3<T> bmin, bmax; #if defined(NANORT_USE_CPP11_FEATURE) && defined(NANORT_ENABLE_PARALLEL_BUILD) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #endif unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update stats_.num_leaf_nodes++; return offset; } // // Create branch node. // if (depth >= max_shallow_depth) { // Delay to build tree ShallowNodeInfo info; info.left_idx = left_idx; info.right_idx = right_idx; info.offset = offset; shallow_node_infos_.push_back(info); // Add dummy node. BVHNode<T> node; node.axis = -1; node.flag = -1; out_nodes->push_back(node); return offset; } else { // // TODO(LTE): multi-threaded SAH computation, or use simple object median or // spacial median for shallow tree to speeding up the parallel build. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median (which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis if there's an axis to try. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1, max_shallow_depth, p, pred); right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx, depth + 1, max_shallow_depth, p, pred); // std::cout << "shallow[" << offset << "] l and r = " << left_child_index // << ", " << right_child_index << std::endl; (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } stats_.num_branch_nodes++; return offset; } #endif template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (out_stat->max_tree_depth < depth) { out_stat->max_tree_depth = depth; } real3<T> bmin, bmax; if (!bboxes_.empty()) { GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx); } else { ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); } unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update out_stat->num_leaf_nodes++; return offset; } // // Create branch node. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis to find better cut. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred); right_child_index = BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred); { (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } out_stat->num_branch_nodes++; return offset; } template <typename T> template <class Prim, class Pred> bool BVHAccel<T>::Build(unsigned int num_primitives, const Prim &p, const Pred &pred, const BVHBuildOptions<T> &options) { options_ = options; stats_ = BVHBuildStatistics(); nodes_.clear(); bboxes_.clear(); #if defined(NANORT_ENABLE_PARALLEL_BUILD) shallow_node_infos_.clear(); #endif assert(options_.bin_size > 1); if (num_primitives == 0) { return false; } unsigned int n = num_primitives; // // 1. Create triangle indices(this will be permutated in BuildTree) // indices_.resize(n); #if defined(NANORT_USE_CPP11_FEATURE) { size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = t * ndiv; size_t ei = (t == (num_threads - 1)) ? n : std::min((t + 1) * ndiv, size_t(n)); for (size_t k = si; k < ei; k++) { indices_[k] = static_cast<unsigned int>(k); } })); } for (auto &t : workers) { t.join(); } } #else #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < static_cast<int>(n); i++) { indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i); } #endif // !NANORT_USE_CPP11_FEATURE // // 2. Compute bounding box (optional). // real3<T> bmin, bmax; if (options.cache_bbox) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); bboxes_.resize(n); for (size_t i = 0; i < n; i++) { // for each primitive unsigned int idx = indices_[i]; BBox<T> bbox; p.BoundingBox(&(bbox.bmin), &(bbox.bmax), static_cast<unsigned int>(i)); bboxes_[idx] = bbox; // xyz for (int k = 0; k < 3; k++) { bmin[k] = std::min(bmin[k], bbox.bmin[k]); bmax[k] = std::max(bmax[k], bbox.bmax[k]); } } } else { #if defined(NANORT_USE_CPP11_FEATURE) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), 0, n, p); #elif defined(_OPENMP) ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p); #endif } // // 3. Build tree // #if defined(NANORT_ENABLE_PARALLEL_BUILD) #if defined(NANORT_USE_CPP11_FEATURE) // Do parallel build for large enough datasets. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (shallow_node_infos_.size() < num_threads) { num_threads = shallow_node_infos_.size(); } std::vector<std::thread> workers; std::atomic<uint32_t> i(0); for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { uint32_t idx = 0; while ((idx = (i++)) < shallow_node_infos_.size()) { // Create thread-local copy of Pred since some mutable variables are // modified during SAH computation. const Pred local_pred = pred; unsigned int left_idx = shallow_node_infos_[size_t(idx)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(idx)].right_idx; BuildTree(&(local_stats[size_t(idx)]), &(local_nodes[size_t(idx)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } })); } for (auto &t : workers) { t.join(); } // Join local nodes for (size_t ii = 0; ii < local_nodes.size(); ii++) { assert(!local_nodes[ii].empty()); size_t offset = nodes_.size(); // Add offset to child index (for branch node). for (size_t j = 0; j < local_nodes[ii].size(); j++) { if (local_nodes[ii][j].flag == 0) { // branch local_nodes[ii][j].data[0] += offset - 1; local_nodes[ii][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[ii].offset] = local_nodes[ii][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[ii].begin() + 1, local_nodes[ii].end()); } // Join statistics for (size_t ii = 0; ii < local_nodes.size(); ii++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[ii].max_tree_depth); stats_.num_leaf_nodes += local_stats[ii].num_leaf_nodes; stats_.num_branch_nodes += local_stats[ii].num_branch_nodes; } } else { // Single thread. BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #elif defined(_OPENMP) // Do parallel build for large enough datasets. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); #pragma omp parallel for for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) { unsigned int left_idx = shallow_node_infos_[size_t(i)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(i)].right_idx; const Pred local_pred = pred; BuildTree(&(local_stats[size_t(i)]), &(local_nodes[size_t(i)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } // Join local nodes for (size_t i = 0; i < local_nodes.size(); i++) { assert(!local_nodes[size_t(i)].empty()); size_t offset = nodes_.size(); // Add offset to child index (for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (size_t i = 0; i < local_nodes.size(); i++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth); stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes; stats_.num_branch_nodes += local_stats[i].num_branch_nodes; } } else { // Single thread BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #else // !NANORT_ENABLE_PARALLEL_BUILD { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif #else // !_OPENMP // Single thread BVH build { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif return true; } template <typename T> void BVHAccel<T>::Debug() { for (size_t i = 0; i < indices_.size(); i++) { printf("index[%d] = %d\n", int(i), int(indices_[i])); } for (size_t i = 0; i < nodes_.size(); i++) { printf("node[%d] : bmin %f, %f, %f, bmax %f, %f, %f\n", int(i), nodes_[i].bmin[0], nodes_[i].bmin[1], nodes_[i].bmin[2], nodes_[i].bmax[0], nodes_[i].bmax[1], nodes_[i].bmax[2]); } } #if defined(NANORT_ENABLE_SERIALIZATION) template <typename T> bool BVHAccel<T>::Dump(const char *filename) const { FILE *fp = fopen(filename, "wb"); if (!fp) { // fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename); return false; } size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Dump(FILE *fp) const { size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } template <typename T> bool BVHAccel<T>::Load(const char *filename) { FILE *fp = fopen(filename, "rb"); if (!fp) { // fprintf(stderr, "Cannot open file: %s\n", filename); return false; } size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Load(FILE *fp) { size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } #endif // typedef int32_t fixed_point_t; // #define FIXED_POINT_FRACTIONAL_BITS 8 // inline float fixed_to_float(fixed_point_t input) { // return ((float)input / (float)(1 << FIXED_POINT_FRACTIONAL_BITS)); // } // inline fixed_point_t float_to_fixed(float input, bool supress_warning = false) { // if (input > 2147483647) { // if (!supress_warning) { // // printf("warning large number detected, setting to max value\n"); // } // return (fixed_point_t)2147483647; // } // if (input < -2147483648) { // return (fixed_point_t)-2147483648; // } // return (fixed_point_t)(round(input * (1 << FIXED_POINT_FRACTIONAL_BITS))); // } template <typename T> inline bool IntersectRayAABB(T *tminOut, // [out] T *tmaxOut, // [out] T min_t, T max_t, const T bmin[3], const T bmax[3], real3<T> ray_org, real3<T> ray_inv_dir, int ray_dir_sign[3], Simulator *simulator); template <> inline bool IntersectRayAABB<float>(float *tminOut, // [out] float *tmaxOut, // [out] float min_t, float max_t, const float bmin[3], const float bmax[3], real3<float> ray_org, real3<float> ray_inv_dir, int ray_dir_sign[3], Simulator *simulator) { #define vimp #ifdef vimp // VerilatedContext *contextp = new VerilatedContext; // Vintersector *vinter_obj = new Vintersector{contextp}; // vinter_obj->min_t = float_to_fixed(min_t); // vinter_obj->max_t = float_to_fixed(max_t); // vinter_obj->bmin0 = float_to_fixed(bmin[0]); // vinter_obj->bmin1 = float_to_fixed(bmin[1]); // vinter_obj->bmin2 = float_to_fixed(bmin[2]); // vinter_obj->bmax0 = float_to_fixed(bmax[0]); // vinter_obj->bmax1 = float_to_fixed(bmax[1]); // vinter_obj->bmax2 = float_to_fixed(bmax[2]); // vinter_obj->ray_org0 = float_to_fixed(ray_org[0]); // vinter_obj->ray_org1 = float_to_fixed(ray_org[1]); // vinter_obj->ray_org2 = float_to_fixed(ray_org[2]); // vinter_obj->ray_inv_dir0 = float_to_fixed(ray_inv_dir[0]); // vinter_obj->ray_inv_dir1 = float_to_fixed(ray_inv_dir[1]); // vinter_obj->ray_inv_dir2 = float_to_fixed(ray_inv_dir[2]); // vinter_obj->ray_dir_sign0 = ray_dir_sign[0]; // vinter_obj->ray_dir_sign1 = ray_dir_sign[1]; // vinter_obj->ray_dir_sign2 = ray_dir_sign[2]; // fflush(stdout); // while (!contextp->gotFinish()) { // vinter_obj->eval(); // } // float out_tmin = fixed_to_float(vinter_obj->tmin); // float out_tmax = fixed_to_float(vinter_obj->tmax); intersector_input_t in; in.min_t = float_to_fixed(min_t); in.max_t = float_to_fixed(max_t); in.bmin0 = float_to_fixed(bmin[0]); in.bmin1 = float_to_fixed(bmin[1]); in.bmin2 = float_to_fixed(bmin[2]); in.bmax0 = float_to_fixed(bmax[0]); in.bmax1 = float_to_fixed(bmax[1]); in.bmax2 = float_to_fixed(bmax[2]); in.ray_org0 = float_to_fixed(ray_org[0]); in.ray_org1 = float_to_fixed(ray_org[1]); in.ray_org2 = float_to_fixed(ray_org[2]); in.ray_inv_dir0 = float_to_fixed(ray_inv_dir[0]); in.ray_inv_dir1 = float_to_fixed(ray_inv_dir[1]); in.ray_inv_dir2 = float_to_fixed(ray_inv_dir[2]); in.ray_dir_sign0 = ray_dir_sign[0]; in.ray_dir_sign1 = ray_dir_sign[1]; in.ray_dir_sign2 = ray_dir_sign[2]; simulator->append_to_queue(in); simulator->step(); intersector_output_t out = simulator->pop_queue(); // printf("out min %f max %f\n", out.tmin, out.tmax); bool v_out = out.tmin <= out.tmax; // delete vinter_obj; // delete contextp; #endif #ifdef ogimp float tmin, tmax; const float min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const float min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const float min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const float max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const float max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const float max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const float tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). // 1.0000000000000004 for double precision. const float tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f; // printf("og tminx %f\n", tmin_x); // printf("og tmaxx %f\n", tmin_x); // Y const float tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const float tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f; // printf("og tminy %f\n", tmin_y); // printf("og tmaxy %f\n", tmin_y); // Z const float tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const float tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f; // printf("og tminz %f\n", tmin_z); // printf("og tmaxz %f\n", tmin_z); tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); #endif // printf("og tmin %f tmax %f\n", tmin, tmax); #ifdef vimp return v_out; #endif #ifdef ogimp if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; // if (!v_out) printf("MISMATCHED OUTPUT, SHOULD BE TRUE\n"); return true; } // if (v_out) printf("MISMATCHED OUTPUT, SHOULD BE FALSE\n"); return false; // no hit #endif } template <> inline bool IntersectRayAABB<double>(double *tminOut, // [out] double *tmaxOut, // [out] double min_t, double max_t, const double bmin[3], const double bmax[3], real3<double> ray_org, real3<double> ray_inv_dir, int ray_dir_sign[3], Simulator *simulator) { double tmin, tmax; const double min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const double min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const double min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const double max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const double max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const double max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const double tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). const double tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.0000000000000004; // Y const double tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const double tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.0000000000000004; // Z const double tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const double tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.0000000000000004; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = intersector.GetT(); // current hit distance real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t; if (intersector.Intersect(&local_t, prim_idx)) { // Update isect state t = local_t; intersector.Update(t, prim_idx); hit = true; } } return hit; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTestLeafNode( std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = std::numeric_limits<T>::max(); if (isect_pq->size() >= static_cast<size_t>(max_intersections)) { t = isect_pq->top().t; // current furthest hit distance } real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t, u = 0.0f, v = 0.0f; if (intersector.Intersect(&local_t, &u, &v, prim_idx)) { // Update isect state if ((local_t > ray.min_t)) { if (isect_pq->size() < static_cast<size_t>(max_intersections)) { H isect; t = local_t; isect.t = t; isect.u = u; isect.v = v; isect.prim_id = prim_idx; isect_pq->push(isect); // Update t to furthest distance. t = ray.max_t; hit = true; } else if (local_t < isect_pq->top().t) { // delete furthest intersection and add new intersection. isect_pq->pop(); H hit; hit.t = local_t; hit.u = u; hit.v = v; hit.prim_id = prim_idx; isect_pq->push(hit); // Update furthest hit distance t = isect_pq->top().t; hit = true; } } } } return hit; } #endif template <typename T> template <class I, class H> bool BVHAccel<T>::Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options) const { const int kMaxStackDepth = 512; (void)kMaxStackDepth; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t = std::numeric_limits<T>::max(); T max_t = -std::numeric_limits<T>::max(); while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[index]; node_stack_index--; // VerilatedContext *contextp = new VerilatedContext; // // contextp->commandArgs(argc, argv); // Vintersector *vinter_obj = new Vintersector{contextp}; // // intersector->clk = 0; // vinter_obj->reset = 0; // vinter_obj->in_dummy = 0x11111111; // while (!contextp->gotFinish()) { // vinter_obj->eval(); // } // int out = vinter_obj->out_dummy; // printf("output of intersector: %x\n", out); // delete vinter_obj; // delete contextp; auto start = std::chrono::steady_clock::now(); bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign, simulator); auto end = std::chrono::steady_clock::now(); // *timer += end - start; if (hit) { // Branch node if (node.flag == 0) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } else if (TestLeafNode(node, ray, intersector)) { // Leaf node hit_t = intersector.GetT(); } } } assert(node_stack_index < kNANORT_MAX_STACK_DEPTH); bool hit = (intersector.GetT() < ray.max_t); intersector.PostTraversal(ray, hit, isect); return hit; } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; intersector.PrepareTraversal(ray); for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T min_t, max_t; if (intersector.Intersect(&min_t, &max_t, prim_idx)) { // Always add to isect lists. NodeHit<T> isect; isect.t_min = min_t; isect.t_max = max_t; isect.node_id = prim_idx; if (isect_pq->size() < static_cast<size_t>(max_intersections)) { isect_pq->push(isect); } else if (min_t < isect_pq->top().t_min) { // delete the furthest intersection and add a new intersection. isect_pq->pop(); isect_pq->push(isect); } } } return hit; } template <typename T> template <class I> bool BVHAccel<T>::ListNodeIntersections( const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > isect_pq; (*hits)->clear(); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign, simulator); if (hit) { // Branch node if (node.flag == 0) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } else { // Leaf node TestLeafNodeIntersections(node, ray, max_intersections, intersector, &isect_pq); } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order (make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const NodeHit<T> &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *hits, const BVHTraceOptions& options) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<H, std::vector<H>, Comp> isect_pq; (*hits)->clear(); // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); // branch node if(hit) { if (node.flag == 0) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } else { if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, intersector)) { // Only update `hit_t` when queue is full. if (isect_pq.size() >= static_cast<size_t>(max_intersections)) { hit_t = isect_pq.top().t; } } } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order (make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const H &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #endif #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace nanort #endif // NANORT_H_
2229.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute schedule(dynamic, 4) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
sweep_MT_mex_old.c
/* * Kaczmarz down-up sweep on diagonally banded matrix. The essential loop is: * * for each row i * x = x + w*(b(i) - A(i,:)*x)*A(i,:)' * end * * The matrix is given in band storage format, where each row (stored * contiguously in memory) of the array R stores a diagonal of the matrix with * offset idx(j), such that * * A(i,i+idx(j)) = R(i,j) * * use (from MATLAB): * y = sweepR_mex(R,idx,x,b,w,dir) * * R - matrix of diagonals of matrix A * idx - offsets of diagonals * x - initial guess * b - right hand side (source) * w - relaxation parameter (0 <= w <= 2) * ns - number of RHS * n_threads - OPTIONAL argument to control the number of execution * threads solving CARP blocks in parallel. The number of threads can also * be defined via an environment variable (OMP_NUM_THREADS), but this * optional argument takes precedence. The default number of threads is * one. Take care if using more than one MATLAB worker per node: each * MATLAB worker will use OMP_NUM_THREADS, so if there are four workers on * a node, there will be 4 x OMP_NUM_THREADS parallel CARP sweeps. * compile : * mex -largeArrayDims sweep_MT_mex.c -DDEFINEUNIX -lmwblas CFLAGS="\$CFLAGS -fopenmp" LDFLAGS="\$LDFLAGS -fopenmp" * * Author: Mathias Louboutin from Art Petrenko sweepR_mex.c * Seismic Laboratory for Imaging and Modeling * Department of Earth, Ocean, and Atmosperic Sciences * The University of British Columbia * * Date: March, 2015 * You may use this code only under the conditions and terms of the * license contained in the file LICENSE provided with this source * code. If you do not agree to these terms you may not use this * software. */ #include <stdlib.h> /* for getenv */ #include <stddef.h> /* for size_t type */ #include <string.h> /* for memcpy */ #include <pthread.h> /* for threading */ #include <omp.h> /* The following section allows this file to compile on Mac OS X 10.8.5. Pass * the flag -DARCH_MACI64 to the compiler to activate it. */ #ifdef ARCH_MACI64 #include <mach/error.h> typedef wchar_t char16_t; #else /* not ARCH_MACI64 */ #include <error.h> #endif /* ARCH_MACI64 */ #include <math.h> #include <mex.h> #include <matrix.h> #include <blas.h> struct copy_init_guess_data_t { double *copy_src_real, *copy_dst_real; double *copy_src_imag, *copy_dst_imag; long n_to_copy; }; struct sweep_data_t { long start_row, end_row, ncol, ny, nx, haloWidth, main_diagonal_offset; double *Rr, *Ri, *yr, *yi, *br, *bi; long *idx; double w; int dir, n_threads, ns; }; struct average_data_t { double *copy_src_real, *copy_dst_real; double *copy_src_imag, *copy_dst_imag; double *halo_1_real, *halo_2_real; double *halo_1_imag, *halo_2_imag; double *halo_dst_real, *halo_dst_imag; long n_to_copy, n_in_halo; }; struct thread_data_t { struct copy_init_guess_data_t copy_init_guess_data; struct sweep_data_t sweep_data; struct average_data_t average_data; pthread_barrier_t *barrier; }; void *do_sweep(void *thread_args_void) { struct sweep_data_t *thread_args; /* Variables contained in thread_args_void struct */ long start_row, end_row, ncol, ny, nx; /* Rr and Ri are pointers to short fat ncol-by-N matrices */ double *Rr, *Ri, *yr, *yi, *br, *bi; long *idx; double w; int n_threads,ns; /* Temporary storage variables */ double cr = 0, ci = 0; long offset, main_diagonal_offset; /* Assign local pointers to data locations in shared memory */ thread_args = (struct sweep_data_t *) thread_args_void; start_row = thread_args->start_row; end_row = thread_args->end_row; ncol = thread_args->ncol; ny = thread_args->ny; nx = thread_args->nx; main_diagonal_offset = thread_args->main_diagonal_offset; Rr = thread_args->Rr; Ri = thread_args->Ri; idx = thread_args->idx; yr = thread_args->yr; yi = thread_args->yi; br = thread_args->br; bi = thread_args->bi; w = thread_args->w; n_threads = thread_args->n_threads; ns = thread_args->ns; offset = (start_row == 0 ? 0 : - main_diagonal_offset); long i; int s; long j; long k; long indj; long toto; #pragma omp parallel for schedule(static,1) private(k,indj,i,j,cr,ci) num_threads(n_threads) for(s=0 ; s<ns; s++) { /* Kaczmarz sweep on one row block */ for(i = start_row ; i<end_row ;i++ ) { if (0 <= i + main_diagonal_offset && i + main_diagonal_offset < nx){ cr = br[i + main_diagonal_offset+s*nx]; ci = bi[i + main_diagonal_offset+nx*s]; } else{ error(1,0,"Discovery of whether the iterate vector is haloed failed."); } /* First loop over non-zero row elements calculates inner product * of matrix row and CARP iterate */ long diff = i - start_row + offset; long icol=i*ncol; for(j=0 ; j<ncol;j++) { /* i + idx[j] is the column index for the full Helmholtz matrix. * k is the index into the vector representing the CARP iterate * of the given block. */ k = diff + idx[j]; indj=icol + j; if(0<=k && k<ny) { cr -= Rr[indj]*yr[k+s*nx] - Ri[indj]*yi[k+s*nx]; ci -= Rr[indj]*yi[k+s*nx] + Ri[indj]*yr[k+s*nx]; } } cr*=w; ci*=w; /* Second loop over non-zero row elements updates Karkzmarz iterate */ for(j=0 ; j<ncol;j++) { k = diff + idx[j]; indj=icol + j; if(0<=k && k<ny) { yr[k+s*nx] += cr*Rr[indj] + ci*Ri[indj]; yi[k+s*nx] += -cr*Ri[indj] + ci*Rr[indj]; } } } cr = 0; ci = 0; for(i = end_row-1 ; i>start_row-1 ;i-- ) { if (0 <= i + main_diagonal_offset && i + main_diagonal_offset < nx){ cr = br[i + main_diagonal_offset+s*nx]; ci = bi[i + main_diagonal_offset+nx*s]; } else{ error(1,0,"Discovery of whether the iterate vector is haloed failed."); } /* First loop over non-zero row elements calculates inner product * of matrix row and CARP iterate */ long diff = i - start_row + offset; long icol=i*ncol; for(j=0 ; j<ncol;j++) { /* i + idx[j] is the column index for the full Helmholtz matrix. * k is the index into the vector representing the CARP iterate * of the given block. */ k = diff + idx[j]; indj=icol + j; if(0<=k && k<ny) { cr -= Rr[indj]*yr[k+s*nx] - Ri[indj]*yi[k+s*nx]; ci -= Rr[indj]*yi[k+s*nx] + Ri[indj]*yr[k+s*nx]; } } cr*=w; ci*=w; /* Second loop over non-zero row elements updates Karkzmarz iterate */ for(j=0 ; j<ncol;j++) { k = diff + idx[j]; indj=icol + j; if(0<=k && k<ny) { yr[k+s*nx] += cr*Rr[indj] + ci*Ri[indj]; yi[k+s*nx] += -cr*Ri[indj] + ci*Rr[indj]; } } } } return NULL; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* structs to hold all arguments to each thread in one variable */ struct sweep_data_t **thread_args_sweep = NULL; struct copy_init_guess_data_t **copy_init_guess_data = NULL; mwSize ncol, nx; ptrdiff_t ncolBlas, idxIncBlas = 1, maxIdxLoc = 0; double *Rr,*Ri,*idxd = NULL,*xr,*xi,*br,*bi,*yr,*yi; long *idx = NULL; double w = 0; int ns; mwSize n_threads = 1; char *n_threads_str = NULL; mwSize N=1, numGridPointsPerBlock, main_diagonal_offset; /* Flags that are set if memory is allocated within the MEX file */ int Ri_alloc=0, xi_alloc=0, bi_alloc=0; mwSize *seg_bounds_hi, *seg_bounds_mid, *seg_bounds_row, *seg_bounds_lo; /* Read input arguments; initialize complex part to zero if input is real. */ ns = lrint(mxGetScalar(prhs[5])); N = mxGetN(prhs[0]); ncol = mxGetM(prhs[0]); ncolBlas = (ptrdiff_t)ncol; Rr = mxGetPr(prhs[0]); if(mxIsComplex(prhs[0])){ Ri = mxGetPi(prhs[0]); } else{ Ri = mxCalloc(N*ncol,sizeof(double)); Ri_alloc = 1; } idxd = mxGetPr(prhs[1]); nx = mxGetM(prhs[2]); xr = mxGetPr(prhs[2]); if(mxIsComplex(prhs[2])){ xi = mxGetPi(prhs[2]); } else{ xi = mxCalloc(nx*ns,sizeof(double)); xi_alloc = 1; } br = mxGetPr(prhs[3]); if(mxIsComplex(prhs[3])){ bi = mxGetPi(prhs[3]); } else{ bi = mxCalloc(nx*ns,sizeof(double)); bi_alloc = 1; } if (mxGetM(prhs[3]) != nx){ mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:sweepR_mex:NumElements", "The number of elements in the iterate and right hand side vectors must be equal."); } /* Allocate the final output vector */ plhs[0] = mxCreateDoubleMatrix(nx,ns, mxCOMPLEX); yr = mxGetPr(plhs[0]); yi = mxGetPi(plhs[0]); /* Check to make sure memory was allocated correctly */ if (Rr==NULL || Ri==NULL || idxd==NULL || xr==NULL || xi==NULL || br==NULL || bi==NULL || yr==NULL || yi==NULL){ mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:sweepR_mex:OutOfMemory", "Could not allocate memory for main computational variables."); } if ((idx = (long *) mxCalloc(ncol, sizeof(long))) == NULL){ mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:sweepR_mex:OutOfMemory", "Could not allocate memory for main computational variables."); } mwSize i; for (i=0; i < ncol; i++){ idx[i] = lrint(idxd[i]); } /* The default value for the number of threads can be overridden by an * environment variable. */ n_threads_str = getenv("OMP_NUM_THREADS"); if (n_threads_str == NULL){ n_threads = 1; } else{ n_threads = strtol(n_threads_str, NULL, 10); if(n_threads < 1){ n_threads = 1; } } /* The environment variable can in turn be overridden by an optional * argument to the mexFunction. */ if (nrhs >= 7){ if(1 <= lrint(mxGetScalar(prhs[6]))){ n_threads = lrint(mxGetScalar(prhs[6])); } } /* printf("Using %d threads \n",n_threads); /* Partition the iterate vector into blocks. Note that the below * partitioning scheme is slighlty different from that in pCARPCG.m in this * directory. The partitioning scheme of pCARPCG corresponds to * distributing a three dimensional array with dimensions given by n * according to Matlab's codistributor1d.defaultPartition(n(3)), and then * vectorizing it. The partition scheme of the present file instead uses * Matlab's codistributor1d.defaultPartion(prod(n)). In other words, * pCARPCG divides the iterate into blocks along the slow dimension, * whereas sweepR_mex.c does not take dimensionality into account, only the * total number of gridpoints. This is done to avoid needing an extra input * parameter with the the system dimensions. The seg_bounds_hi, _lo and * _mid arrays contain indices into non-haloed vectors, while the * seg_bounds_row array contains indices to the rows of the system matrix. * * yr_seg[i_thread-1] overlap yr_seg[i_thread] * ------------------------|-----|-----|------------------------------- * .----------------^ | ^-------------------. * seg_bounds_lo[i_thread], seg_bounds_mid[i_thread], seg_bounds_hi[i_thread] */ numGridPointsPerBlock = N; seg_bounds_hi = (mwSize *)mxCalloc(2,sizeof(mwSize)); seg_bounds_mid = (mwSize *)mxCalloc(2,sizeof(mwSize)); seg_bounds_lo = (mwSize *)mxCalloc(2,sizeof(mwSize)); seg_bounds_row = (mwSize *)mxCalloc(2,sizeof(mwSize)); if (N == nx){ main_diagonal_offset = 0; } else{ /* The vector is haloed. We are only able to correctly process matrices * with a non-zero main diagonal and symmetric off-main diagonal offsets. */ if (ncol % 2 != 1){ mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:sweepR_mex:EvenNumberOfDiags", "Input iterate vector appears to be haloed but there is an even number of non-zero diagonals in the system matrix."); } main_diagonal_offset = idx[ncol/2]; mwSize i; for (i = 1; i <= ncol/2; i++){ if (idx[ncol/2 + i] - main_diagonal_offset != -(idx[ncol/2 - i] - main_diagonal_offset)){ mexErrMsgIdAndTxt("SLIM_release_apps:tools:algorithms:ThreeDFreqModeling:sweepR_mex:DiagsNotSymmetric", "Input iterate vector appears to be haloed but the pattern of non-zero diagonals in the system matrix is not symmetric."); } } } seg_bounds_hi[0] = 0; seg_bounds_mid[0] = 0; seg_bounds_row[0] = 0; seg_bounds_lo[0] = 0; seg_bounds_lo[1] = nx; seg_bounds_hi[1] = nx; seg_bounds_mid[1] = nx; seg_bounds_row[1] = N; thread_args_sweep = (struct sweep_data_t **)mxCalloc(1, sizeof(struct sweep_data_t *)); /* Set thread arguments */ thread_args_sweep[0] = (struct sweep_data_t *)mxCalloc(1,sizeof(struct sweep_data_t)); thread_args_sweep[0]->start_row = seg_bounds_row[0]; thread_args_sweep[0]->end_row = seg_bounds_row[1]; thread_args_sweep[0]->ncol = ncol; thread_args_sweep[0]->ny = seg_bounds_hi[1]-seg_bounds_lo[0]; thread_args_sweep[0]->nx = nx; thread_args_sweep[0]->main_diagonal_offset = main_diagonal_offset; thread_args_sweep[0]->Rr = Rr; thread_args_sweep[0]->Ri = Ri; thread_args_sweep[0]->idx = idx; thread_args_sweep[0]->yr = yr; thread_args_sweep[0]->yi = yi; thread_args_sweep[0]->br = br; thread_args_sweep[0]->bi = bi; thread_args_sweep[0]->w = w; thread_args_sweep[0]->n_threads = n_threads; thread_args_sweep[0]->ns = ns; /* Set the initial guess directly in the output array too */ memcpy((void *)yr, (void *)xr, sizeof(double)*nx*ns); memcpy((void *)yi, (void *)xi, sizeof(double)*nx*ns); do_sweep((void *)thread_args_sweep[0]); /* Free memory if it was allocated within the MEX file. */ if (Ri_alloc){ mxFree(Ri); } if (xi_alloc){ mxFree(xi); } if (bi_alloc){ mxFree(bi); } mxFree(idx); mxFree(thread_args_sweep); /* Don't think I need pthread_exit() here, because pthread_join is called above */ return; }
GB_unaryop__lnot_fp64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_int8 // op(A') function: GB_tran__lnot_fp64_int8 // C type: double // A type: int8_t // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_int8 ( double *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
pr26943-3.c
/* PR c++/26943 */ /* { dg-do run } */ extern int omp_set_dynamic (int); extern int omp_get_thread_num (void); extern void abort (void); int a = 8, b = 12, c = 16, d = 20, j = 0, l = 0; char e[10] = "a", f[10] = "b", g[10] = "c", h[10] = "d"; volatile int k; int main (void) { int i; omp_set_dynamic (0); omp_set_nested (1); #pragma omp parallel num_threads (2) reduction (+:l) if (k == omp_get_thread_num ()) { #pragma omp parallel for shared (a, e) firstprivate (b, f) \ lastprivate (c, g) private (d, h) \ schedule (static, 1) num_threads (4) \ reduction (+:j) for (i = 0; i < 4; i++) { if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b') j++; #pragma omp barrier #pragma omp atomic a += i; b += i; c = i; d = i; #pragma omp atomic e[0] += i; f[0] += i; g[0] = 'g' + i; h[0] = 'h' + i; #pragma omp barrier if (a != 8 + 6 || b != 12 + i || c != i || d != i) j += 8; if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i) j += 64; if (h[0] != 'h' + i) j += 512; } if (j || a != 8 + 6 || b != 12 || c != 3 || d != 20) ++l; if (e[0] != 'a' + 6 || f[0] != 'b' || g[0] != 'g' + 3 || h[0] != 'd') l += 8; } if (l) abort (); return 0; }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__) #define MXNET_USE_MKL_DROPOUT 1 #endif #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0)) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; if (sizeof(DType) > sizeof(int)) { // allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr` Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s); maskptr = temp.dptr_; } BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1; outptr[i] = dataptr[i] * maskVal; mask.dptr_[i] = maskVal; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; const DType *maskptr = mask.dptr_; const int count = mask.shape_[0] * mask.shape_[1]; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. Random<xpu, unsigned> *prnd = ctx.requested[1].get_random<xpu, unsigned>(s); uint64_t rng_seed = prnd->GetSeed(); // reset dropout descriptor if rng seed changed. bool reset = seed_ != rng_seed; seed_ = rng_seed; ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_, reset); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { if (req[dropout::kOut] == kWriteInplace) return; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; uint64_t seed_; size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
spectra.c
/** @file cl.c Documented spectra module * * Julien Lesgourgues, 25.08.2010 * * This module computes the anisotropy and Fourier power spectra * \f$ C_l^{X}, P(k), ... \f$'s given the transfer and Bessel functions * (for anisotropy spectra), the source functions (for Fourier spectra) * and the primordial spectra. * * The following functions can be called from other modules: * * -# spectra_init() at the beginning (but after transfer_init()) * -# spectra_cl_at_l() at any time for computing C at any l * -# spectra_spectrum_at_z() at any time for computing P(k) at any z * -# spectra_spectrum_at_k_and z() at any time for computing P at any k and z * -# spectra_free() at the end */ #include "spectra.h" int spectra_bandpower(struct spectra * psp, int l1, int l2, double * TT_II, double * TT_RI, double * TT_RR ) { int l; int index_md; double * cl_tot; double ** cl_md; double ** cl_md_ic; class_alloc(cl_tot,psp->ct_size*sizeof(double),psp->error_message); class_alloc(cl_md,psp->md_size*sizeof(double*),psp->error_message); class_alloc(cl_md_ic,psp->md_size*sizeof(double*),psp->error_message); for (index_md=0;index_md<psp->md_size; index_md++) { class_alloc(cl_md[index_md],psp->ct_size*sizeof(double),psp->error_message); class_alloc(cl_md_ic[index_md],psp->ct_size*psp->ic_ic_size[index_md]*sizeof(double),psp->error_message); } *TT_RR=0.; *TT_RI=0.; *TT_II=0.; for (l=l1; l<=l2; l++) { class_call(spectra_cl_at_l(psp, (double)l, cl_tot, cl_md, cl_md_ic), psp->error_message, psp->error_message); *TT_RR += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,0,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]; *TT_RI += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]*2.; *TT_II += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(1,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]; } for (index_md=0;index_md<psp->md_size; index_md++) { free(cl_md[index_md]); free(cl_md_ic[index_md]); } free(cl_tot); free(cl_md); free(cl_md_ic); return _SUCCESS_; } /** * Anisotropy power spectra C_l's for all types, modes and initial conditions. * * This routine evaluates all the C_l's at a given value of l by * interpolating in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param l Input: multipole number * @param cl_tot Ouput: total C_l's for all types (TT, TE, EE, etc..) * @param cl_md Ouput: C_l's for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant * @param cl_md_ic Ouput: C_l's for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant * @return the error status */ int spectra_cl_at_l( struct spectra * psp, double l, double * cl_tot, /* array with argument cl_tot[index_ct] (must be already allocated) */ double * * cl_md, /* array with argument cl_md[index_md][index_ct] (must be already allocated only if several modes) */ double * * cl_md_ic /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] (must be already allocated for a given mode only if several ic's) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_ct; /** A) treat case in which there is only one mode and one initial condition. Then, only cl_tot needs to be filled. */ if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) { index_md = 0; if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate at l */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_tot, psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero for the types such that l<l_max */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_tot[index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; } } /** B) treat case in which there is only one mode with several initial condition. Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */ if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) { index_md = 0; for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (((int)l <= psp->l[psp->l_size[index_md]-1]) && (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } /* compute cl_tot by summing over cl_md_ic */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (index_ic1 == index_ic2) cl_tot[index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_tot[index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** C) loop over modes */ if (psp->md_size > 1) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_md = 0; index_md < psp->md_size; index_md++) { /** C.1) treat case in which the mode under consideration has only one initial condition. Fill cl_md[index_md]. */ if (psp->ic_size[index_md] == 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_md[index_md], psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md[index_md][index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md[index_md][index_ct]=0.; } } /** C.2) treat case in which the mode under consideration has several initial conditions. Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */ if (psp->ic_size[index_md] > 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate all ic and ct */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero some of the components */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_)) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* if l was too big, set anyway all components to zero */ else { for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* sum up all ic for each mode */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md[index_md][index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (index_ic1 == index_ic2) cl_md[index_md][index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_md[index_md][index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** C.3) add contribution of cl_md[index_md] to cl_tot */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]+=cl_md[index_md][index_ct]; } } return _SUCCESS_; } /** * Matter power spectrum for arbitrary redshift and for all initial conditions. * * This routine evaluates the matter power spectrum at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want P(k,z=0)) * * * Can be called in two modes: linear or logarithmic. * * - linear: returns P(k) (units: Mpc^3) * * - logarithmic: returns ln(P(k)) * * One little subtlety: in case of several correlated initial conditions, * the cross-correlation spectrum can be negative. Then, in logarithmic mode, * the non-diagonal elements contain the cross-correlation angle P_12/sqrt(P_11 P_22) * (from -1 to 1) instead of ln(P_12) * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Ouput: total matter power spectrum P(k) in Mpc**3 (linear mode), or its logarithms (logarithmic mode) * @param output_ic Ouput: for each pair of initial conditions, matter power spectra P(k) in Mpc**3 (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @return the error status */ int spectra_pk_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot, /* array with argument output_tot[index_k] (must be already allocated) */ double * output_ic /* array with argument output_tot[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] (must be already allocated only if more than one initial condition) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; int index_k; double tau,ln_tau; int index_ic1,index_ic2,index_ic1_ic2; index_md = psp->index_md_scalars; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */ /** (a.) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only P(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) if (psp->ic_size[index_md] == 1) { output_tot[index_k] = psp->ln_pk[index_k]; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = psp->ln_pk[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]; } } } /** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { if (psp->ic_ic_size[index_md] == 1) { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ddln_pk, psp->ln_k_size, ln_tau, &last_index, output_tot, psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ddln_pk, psp->ic_ic_size[index_md]*psp->ln_k_size, ln_tau, &last_index, output_ic, psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } } /** - third step: if there are several initial conditions, compute the total P(k) and set back all uncorrelated coefficients to exactly zero. Check positivity of total P(k). */ if (psp->ic_size[index_md] > 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = 0.; for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (index_ic1 == index_ic2) { output_tot[index_k] += exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]); } else { if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { output_tot[index_k] += 2. * output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] * sqrt(exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]) * exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])])); } else output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.; } } } class_test(output_tot[index_k] <= 0., psp->error_message, "for k=%e, z=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total=%e results negative", exp(psp->ln_k[index_k]),z,output_tot[index_k]); } } /** - fourth step: depending on requested mode (linear or logarithmic), apply necessary transformation to the output arrays */ /** (a.) linear mode: if only one initial condition, convert output_pk to linear format; if several initial conditions, convert output_ic to linear format, output_tot is already in this format */ if (mode == linear) { if (psp->ic_size[index_md] == 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = exp(output_tot[index_k]); } } else { for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]); } for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] = output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] *sqrt(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])] * output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]); } } } } } /** (b.) logarithmic mode: if only one initial condition, nothing to be done; if several initial conditions, convert output_tot to logarithmic format, output_ic is already in this format */ else { if (psp->ic_size[index_md] > 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { /* we have already checked above that output_tot was positive */ output_tot[index_k] = log(output_tot[index_k]); } } } return _SUCCESS_; } /** * Matter power spectrum for arbitrary wavenumber, redshift and initial condition. * * This routine evaluates the matter power spectrum at a given value of k and z by * interpolating in a table of all P(k)'s computed at this z by spectra_pk_at_z() (when kmin <= k <= kmax), * or eventually by using directly the primordial spectrum (when 0 <= k < kmin): * the latter case is an approximation, valid when kmin << comoving Hubble scale today. * Returns zero when k=0. Returns an error when k<0 or k > kmax. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Ouput: total matter power spectrum P(k) in Mpc**3 * @param pk_ic Ouput: for each pair of initial conditions, matter power spectra P(k) in Mpc**3 * @return the error status */ int spectra_pk_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot, /* pointer to a single number (must be already allocated) */ double * pk_ic /* array of argument pk_ic[index_ic1_ic2] (must be already allocated only if several initial conditions) */ ) { /** Summary: */ /** - define local variables */ int index_md; int index_k; int last_index; int index_ic1,index_ic2,index_ic1_ic2; double * spectrum_at_z = NULL; double * spectrum_at_z_ic = NULL; double * spline; double * pk_primordial_k = NULL; double kmin; double * pk_primordial_kmin = NULL; index_md = psp->index_md_scalars; /** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */ class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /** - deal with case 0 <= k < kmin */ if (k < exp(psp->ln_k[0])) { /** (a.) subcase k=0: then P(k)=0 */ if (k == 0.) { if (psp->ic_size[index_md] == 1) { *pk_tot=0.; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { pk_ic[index_ic1_ic2] = 0.; } } } /** (b.) subcase 0<k<kmin: in this case we know that on super-Hubble scales: * P(k) = [some number] * k * P_primordial(k) * so * P(k) = P(kmin) * (k P_primordial(k)) / (kmin P_primordial(kmin)) * (note that the result is accurate only if kmin is such that [a0 kmin] << H0) */ else { /* compute P(k,z) which contains P(kmin,z)*/ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); if (psp->ic_size[index_md] > 1) { class_alloc(spectrum_at_z_ic, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); } class_call(spectra_pk_at_z(pba, psp, linear, z, spectrum_at_z, spectrum_at_z_ic), psp->error_message, psp->error_message); /* compute P_primordial(k) */ class_alloc(pk_primordial_k, sizeof(double)*psp->ic_ic_size[index_md], psp->error_message); class_call(primordial_spectrum_at_k(ppm, index_md, linear, k, pk_primordial_k), ppm->error_message,psp->error_message); /* compute P_primordial(kmin) */ kmin = exp(psp->ln_k[0]); class_alloc(pk_primordial_kmin, sizeof(double)*psp->ic_ic_size[index_md], psp->error_message); class_call(primordial_spectrum_at_k(ppm, index_md, linear, kmin, pk_primordial_kmin), ppm->error_message, psp->error_message); /* apply above analytic approximation for P(k) */ index_k=0; if (psp->ic_size[index_md] == 1) { index_ic1_ic2 = 0; *pk_tot = spectrum_at_z[index_k] *k*pk_primordial_k[index_ic1_ic2] /kmin/pk_primordial_kmin[index_ic1_ic2]; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { pk_ic[index_ic1_ic2] = spectrum_at_z_ic[index_ic1_ic2] *k*pk_primordial_k[index_ic1_ic2] /kmin/pk_primordial_kmin[index_ic1_ic2]; } } free(spectrum_at_z); if (psp->ic_size[index_md] > 1) free(spectrum_at_z_ic); free(pk_primordial_k); free(pk_primordial_kmin); } } /** - deal with case kmin <= k <= kmax */ else { /* compute P(k,z) (in logarithmic format for more accurate interpolation) */ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); if (psp->ic_size[index_md] > 1) { class_alloc(spectrum_at_z_ic, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); } class_call(spectra_pk_at_z(pba, psp, logarithmic, z, spectrum_at_z, spectrum_at_z_ic), psp->error_message, psp->error_message); /* get its second derivatives with spline, then interpolate, then convert to linear format */ class_alloc(spline, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); if (psp->ic_size[index_md] == 1) { class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z, 1, spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z, spline, 1, log(k), &last_index, pk_tot, 1, psp->error_message), psp->error_message, psp->error_message); *pk_tot = exp(*pk_tot); } else { class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z_ic, psp->ic_ic_size[index_md], spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z_ic, spline, psp->ic_ic_size[index_md], log(k), &last_index, pk_ic, psp->ic_ic_size[index_md], psp->error_message), psp->error_message, psp->error_message); for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); pk_ic[index_ic1_ic2] = exp(pk_ic[index_ic1_ic2]); } for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { pk_ic[index_ic1_ic2] = pk_ic[index_ic1_ic2]* sqrt(pk_ic[index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]* pk_ic[index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]); } else { pk_ic[index_ic1_ic2] = 0.; } } } free(spectrum_at_z_ic); } free(spectrum_at_z); free(spline); } /** - last step: if more than one condition, sum over pk_ic to get pk_tot, and set back coefficients of non-correlated pairs to exactly zero. */ if (psp->ic_size[index_md] > 1) { *pk_tot = 0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { if (index_ic1 == index_ic2) *pk_tot += pk_ic[index_ic1_ic2]; else *pk_tot += 2.*pk_ic[index_ic1_ic2]; } else { pk_ic[index_ic1_ic2] = 0.; } } } class_test(*pk_tot <= 0., psp->error_message, "for k=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total results negative",k); } return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary redshift. * * This routine evaluates the non-linear matter power spectrum at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want P(k,z=0)) * * * Can be called in two modes: linear or logarithmic. * * - linear: returns P(k) (units: Mpc^3) * * - logarithmic: returns ln(P(k)) * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Ouput: total matter power spectrum P(k) in Mpc**3 (linear mode), or its logarithms (logarithmic mode) * @return the error status */ int spectra_pk_nl_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot /* array with argument output_tot[index_k] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_k; double tau,ln_tau; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */ /** (a.) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only P(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = psp->ln_pk_nl[index_k]; } } /** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk_nl, psp->ddln_pk_nl, psp->ln_k_size, ln_tau, &last_index, output_tot, psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } /** - fourth step: eventually convert to linear format */ if (mode == linear) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = exp(output_tot[index_k]); } } return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary wavenumber and redshift. * * This routine evaluates the matter power spectrum at a given value of k and z by * interpolating in a table of all P(k)'s computed at this z by spectra_pk_nl_at_z() (when kmin <= k <= kmax), * or eventually by using directly the primordial spectrum (when 0 <= k < kmin): * the latter case is an approximation, valid when kmin << comoving Hubble scale today. * Returns zero when k=0. Returns an error when k<0 or k > kmax. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Ouput: total matter power spectrum P(k) in Mpc**3 * @return the error status */ int spectra_pk_nl_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot /* pointer to a single number (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; double * spectrum_at_z = NULL; double * spline; index_md = psp->index_md_scalars; /** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */ class_test((k < exp(psp->ln_k[0])) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /* compute P(k,z) (in logarithmic format for more accurate interpolation) */ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); class_call(spectra_pk_nl_at_z(pba, psp, logarithmic, z, spectrum_at_z), psp->error_message, psp->error_message); /* get its second derivatives with spline, then interpolate, then convert to linear format */ class_alloc(spline, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z, 1, spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z, spline, 1, log(k), &last_index, pk_tot, 1, psp->error_message), psp->error_message, psp->error_message); *pk_tot = exp(*pk_tot); free(spectrum_at_z); free(spline); return _SUCCESS_; } /** * Matter transfer functions T_i(k) for arbitrary redshift and for all * initial conditions. * * This routine evaluates the matter transfer functions at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want T_i(k,z=0)) * * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param z Input: redshift * @param output Ouput: matter transfer functions * @return the error status */ int spectra_tk_at_z( struct background * pba, struct spectra * psp, double z, double * output /* array with argument output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; int index_k; int index_tr; double tau,ln_tau; int index_ic; index_md = psp->index_md_scalars; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: store the matter transfer functions in the output array */ /** (a.) if only values at tau=tau_today are stored and we want T_i(k,z=0), no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only T_i(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) for (index_tr=0; index_tr<psp->tr_size; index_tr++) for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] = psp->matter_transfer[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr]; } /** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->matter_transfer, psp->ddmatter_transfer, psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size, ln_tau, &last_index, output, psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * Matter transfer functions T_i(k) for arbitrary wavenumber, redshift * and initial condition. * * This routine evaluates the matter transfer functions at a given * value of k and z by interpolating in a table of all T_i(k,z)'s * computed at this z by spectra_tk_at_z() (when kmin <= k <= kmax). * Returns an error when k<kmin or k > kmax. * * This function can be called from whatever module at whatever time, * provided that spectra_init() has been called before, and * spectra_free() has not been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param output Ouput: matter transfer functions * @return the error status */ int spectra_tk_at_k_and_z( struct background * pba, struct spectra * psp, double k, double z, double * output /* array with argument output[index_ic*psp->tr_size+index_tr] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; double * tks_at_z; double * ddtks_at_z; index_md = psp->index_md_scalars; /** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_tk_at_z()) */ class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /* compute T_i(k,z) */ class_alloc(tks_at_z, psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double), psp->error_message); class_call(spectra_tk_at_z(pba, psp, z, tks_at_z), psp->error_message, psp->error_message); /* get its second derivatives w.r.t. k with spline, then interpolate */ class_alloc(ddtks_at_z, psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double), psp->error_message); class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, tks_at_z, psp->tr_size*psp->ic_size[index_md], ddtks_at_z, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, tks_at_z, ddtks_at_z, psp->tr_size*psp->ic_size[index_md], log(k), &last_index, output, psp->tr_size*psp->ic_size[index_md], psp->error_message), psp->error_message, psp->error_message); free(tks_at_z); free(ddtks_at_z); return _SUCCESS_; } /** * This routine initializes the spectra structure (in particular, * computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$) * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfer structure * @param ppm Input : pointer to primordial structure * @param psp Output: pointer to initialized spectra structure * @return the error status */ int spectra_init( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct primordial * ppm, struct nonlinear *pnl, struct transfers * ptr, struct spectra * psp ) { /** Summary: */ double TT_II,TT_RI,TT_RR; int l1,l2; /** - check that we really want to compute at least one spectrum */ if ((ppt->has_cls == _FALSE_) && (ppt->has_pk_matter == _FALSE_) && (ppt->has_density_transfers == _FALSE_) && (ppt->has_velocity_transfers == _FALSE_)) { psp->md_size = 0; if (psp->spectra_verbose > 0) printf("No spectra requested. Spectra module skipped.\n"); return _SUCCESS_; } else { if (psp->spectra_verbose > 0) printf("Computing unlensed linear spectra\n"); } /** - initialize indices and allocate some of the arrays in the spectra structure */ class_call(spectra_indices(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); /** - deal with C_l's, if any */ if (ppt->has_cls == _TRUE_) { class_call(spectra_cls(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); } else { psp->ct_size=0; } /** - deal with P(k,tau) and T_i(k,tau) */ if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) { class_call(spectra_k_and_tau(pba,ppt,psp), psp->error_message, psp->error_message); if (ppt->has_pk_matter == _TRUE_) { class_call(spectra_pk(pba,ppt,ppm,pnl,psp), psp->error_message, psp->error_message); } else { psp->ln_pk=NULL; } if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) { class_call(spectra_matter_transfers(pba,ppt,psp), psp->error_message, psp->error_message); } else { psp->matter_transfer=NULL; } } else { psp->ln_k_size=0; } /* if there is one isocurvature mode, compute and store in the psp structure the isocurvature contribution to some bandpowers in different ranges of l, and the contribution to the primordial spectrum at different wavenumbers (used in the Planck analysis) */ if ((ppt->has_scalars == _TRUE_) && (ppt->has_cls == _TRUE_) && (ppt->ic_size[ppt->index_md_scalars] == 2)) { l1=2; l2=20; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_2_20=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_2_20=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_2_20=TT_RR/(TT_II+TT_RI+TT_RR); l1=21; l2=200; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_21_200=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_21_200=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_21_200=TT_RR/(TT_II+TT_RI+TT_RR); l1=201; l2=2500; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_201_2500=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_201_2500=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_201_2500=TT_RR/(TT_II+TT_RI+TT_RR); l1=2; l2=2500; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_2_2500=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_2_2500=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_2_2500=TT_RR/(TT_II+TT_RI+TT_RR); if (ppt->has_cdi==_TRUE_) { psp->alpha_kp=ppm->f_cdi*ppm->f_cdi /(1.+ppm->f_cdi*ppm->f_cdi); psp->alpha_k1=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot))); } if (ppt->has_nid==_TRUE_) { psp->alpha_kp=ppm->f_nid*ppm->f_nid /(1.+ppm->f_nid*ppm->f_nid); psp->alpha_k1=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot))); } if (ppt->has_niv==_TRUE_) { psp->alpha_kp=ppm->f_niv*ppm->f_niv /(1.+ppm->f_niv*ppm->f_niv); psp->alpha_k1=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot))); } } return _SUCCESS_; } /** * This routine frees all the memory space allocated by spectra_init(). * * To be called at the end of each run, only when no further calls to * spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed. * * @param psp Input: pointer to spectra structure (which fields must be freed) * @return the error status */ int spectra_free( struct spectra * psp ) { int index_md; if (psp->md_size > 0) { if (psp->ct_size > 0) { for (index_md = 0; index_md < psp->md_size; index_md++) { free(psp->l_max_ct[index_md]); free(psp->cl[index_md]); free(psp->ddcl[index_md]); } free(psp->l); free(psp->l_size); free(psp->l_max_ct); free(psp->l_max); free(psp->cl); free(psp->ddcl); } if (psp->ln_k_size > 0) { free(psp->ln_tau); free(psp->ln_k); if (psp->ln_pk != NULL) { free(psp->ln_pk); if (psp->ln_tau_size > 1) { free(psp->ddln_pk); } if (psp->ln_pk_nl != NULL) { free(psp->ln_pk_nl); if (psp->ln_tau_size > 1) { free(psp->ddln_pk_nl); } } } if (psp->matter_transfer != NULL) { free(psp->matter_transfer); if (psp->ln_tau_size > 1) { free(psp->ddmatter_transfer); } } } } for (index_md=0; index_md < psp->md_size; index_md++) free(psp->is_non_zero[index_md]); free(psp->is_non_zero); free(psp->ic_size); free(psp->ic_ic_size); return _SUCCESS_; } /** * This routine defines indices and allocates tables in the spectra structure * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param ppm Input : pointer to primordial structure * @param psp Input/output: pointer to spectra structure * @return the error status */ int spectra_indices( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ){ int index_ct; int index_md; int index_ic1_ic2; int index_tr; psp->md_size = ppt->md_size; if (ppt->has_scalars == _TRUE_) psp->index_md_scalars = ppt->index_md_scalars; class_alloc(psp->ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->ic_ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->is_non_zero, sizeof(short *)*psp->md_size, psp->error_message); for (index_md=0; index_md < psp->md_size; index_md++) { psp->ic_size[index_md] = ppm->ic_size[index_md]; psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md]; class_alloc(psp->is_non_zero[index_md], sizeof(short)*psp->ic_ic_size[index_md], psp->error_message); for (index_ic1_ic2=0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2]; } if (ppt->has_cls == _TRUE_) { /* types of C_l's relevant for both scalars and tensors: TT, EE, TE */ index_ct=0; if (ppt->has_cl_cmb_temperature == _TRUE_) { psp->has_tt = _TRUE_; psp->index_ct_tt=index_ct; index_ct++; } else { psp->has_tt = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_ee = _TRUE_; psp->index_ct_ee=index_ct; index_ct++; } else { psp->has_ee = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_polarization == _TRUE_)) { psp->has_te = _TRUE_; psp->index_ct_te=index_ct; index_ct++; } else { psp->has_te = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_bb = _TRUE_; psp->index_ct_bb=index_ct; index_ct++; } else { psp->has_bb = _FALSE_; } /* types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, d-d, T-d */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pp = _TRUE_; psp->index_ct_pp=index_ct; index_ct++; } else { psp->has_pp = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tp = _TRUE_; psp->index_ct_tp=index_ct; index_ct++; } else { psp->has_tp = _FALSE_; } psp->ct_size = index_ct; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ep = _TRUE_; psp->index_ct_ep=index_ct; index_ct++; } else { psp->has_ep = _FALSE_; } if ((ppt->has_scalars == _TRUE_) && ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_))) psp->d_size=ppt->selection_num; else psp->d_size=0; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dd = _TRUE_; psp->index_ct_dd=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_dd = _FALSE_; } /* the computation of C_l^Td would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_td = _TRUE_; psp->index_ct_td=index_ct; index_ct+=psp->d_size; } else { psp->has_td = _FALSE_; } */ psp->has_td = _FALSE_; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pd = _TRUE_; psp->index_ct_pd=index_ct; index_ct+=psp->d_size; } else { psp->has_pd = _FALSE_; } psp->has_td = _FALSE_; if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ll = _TRUE_; psp->index_ct_ll=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_ll = _FALSE_; } /* the computation of C_l^Tl would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tl = _TRUE_; psp->index_ct_tl=index_ct; index_ct+=psp->d_size; } else { psp->has_tl = _FALSE_; } */ psp->has_tl = _FALSE_; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dl = _TRUE_; psp->index_ct_dl=index_ct; index_ct += psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag); } else { psp->has_dl = _FALSE_; } psp->ct_size = index_ct; /* infer from input quantities the l_max for each mode and type, l_max_ct[index_md][index_type]. Maximize it over index_ct, and then over index_md. */ class_alloc(psp->l_max,sizeof(int*)*psp->md_size,psp->error_message); class_alloc(psp->l_max_ct,sizeof(int*)*psp->md_size,psp->error_message); for (index_md=0; index_md<psp->md_size; index_md++) { class_calloc(psp->l_max_ct[index_md],psp->ct_size,sizeof(int),psp->error_message); } if (ppt->has_scalars == _TRUE_) { /* spectra computed up to l_scalar_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max; if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max; if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max; if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max; /* spectra computed up to l_lss_max */ if (psp->has_dd == _TRUE_) for (index_ct=psp->index_ct_dd; index_ct<psp->index_ct_dd+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_td == _TRUE_) for (index_ct=psp->index_ct_td; index_ct<psp->index_ct_td+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_pd == _TRUE_) for (index_ct=psp->index_ct_pd; index_ct<psp->index_ct_pd+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_ll == _TRUE_) for (index_ct=psp->index_ct_ll; index_ct<psp->index_ct_ll+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_tl == _TRUE_) for (index_ct=psp->index_ct_tl; index_ct<psp->index_ct_tl+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_dl == _TRUE_) for (index_ct=psp->index_ct_dl; index_ct < psp->index_ct_dl+(psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag)); index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; } if (ppt->has_tensors == _TRUE_) { /* spectra computed up to l_tensor_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max; if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max; } /* maximizations */ psp->l_max_tot = 0.; for (index_md=0; index_md < psp->md_size; index_md++) { psp->l_max[index_md] = 0.; for (index_ct=0.; index_ct<psp->ct_size; index_ct++) psp->l_max[index_md] = MAX(psp->l_max[index_md],psp->l_max_ct[index_md][index_ct]); psp->l_max_tot = MAX(psp->l_max_tot,psp->l_max[index_md]); } } /* indices for species associated with a matter transfer function in Fourier space */ index_tr=0; class_define_index(psp->index_tr_delta_g,ppt->has_source_delta_g,index_tr,1); class_define_index(psp->index_tr_delta_b,ppt->has_source_delta_b,index_tr,1); class_define_index(psp->index_tr_delta_cdm,ppt->has_source_delta_cdm,index_tr,1); class_define_index(psp->index_tr_delta_dcdm,ppt->has_source_delta_dcdm,index_tr,1); class_define_index(psp->index_tr_delta_scf,ppt->has_source_delta_scf,index_tr,1); class_define_index(psp->index_tr_delta_fld,ppt->has_source_delta_fld,index_tr,1); class_define_index(psp->index_tr_delta_ur,ppt->has_source_delta_ur,index_tr,1); class_define_index(psp->index_tr_delta_dr,ppt->has_source_delta_dr,index_tr,1); class_define_index(psp->index_tr_delta_ncdm1,ppt->has_source_delta_ncdm,index_tr,pba->N_ncdm); class_define_index(psp->index_tr_delta_tot,ppt->has_density_transfers,index_tr,1); /* indices for species associated with a velocity transfer function in Fourier space */ class_define_index(psp->index_tr_theta_g,ppt->has_source_theta_g,index_tr,1); class_define_index(psp->index_tr_theta_b,ppt->has_source_theta_b,index_tr,1); class_define_index(psp->index_tr_theta_cdm,ppt->has_source_theta_cdm,index_tr,1); class_define_index(psp->index_tr_theta_dcdm,ppt->has_source_theta_dcdm,index_tr,1); class_define_index(psp->index_tr_theta_scf,ppt->has_source_theta_scf,index_tr,1); class_define_index(psp->index_tr_theta_fld,ppt->has_source_theta_fld,index_tr,1); class_define_index(psp->index_tr_theta_ur,ppt->has_source_theta_ur,index_tr,1); class_define_index(psp->index_tr_theta_dr,ppt->has_source_theta_ur,index_tr,1); class_define_index(psp->index_tr_theta_ncdm1,ppt->has_source_theta_ncdm,index_tr,pba->N_ncdm); class_define_index(psp->index_tr_theta_tot,ppt->has_velocity_transfers,index_tr,1); psp->tr_size = index_tr; return _SUCCESS_; } /** * This routine computes a table of values for all harmonic spectra C_l's, * given the transfer functions and primordial spectra. * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param ppm Input : pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_cls( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_l; int index_ct; int cl_integrand_num_columns; double * cl_integrand; /* array with argument cl_integrand[index_k*cl_integrand_num_columns+1+psp->index_ct] */ double * transfer_ic1; /* array with argument transfer_ic1[index_tt] */ double * transfer_ic2; /* idem */ double * primordial_pk; /* array with argument primordial_pk[index_ic_ic]*/ /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the parallel region. */ int abort; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop; #endif /** - allocate pointers to arrays where results will be stored */ class_alloc(psp->l_size,sizeof(int)*psp->md_size,psp->error_message); class_alloc(psp->cl,sizeof(double *)*psp->md_size,psp->error_message); class_alloc(psp->ddcl,sizeof(double *)*psp->md_size,psp->error_message); psp->l_size_max = ptr->l_size_max; class_alloc(psp->l,sizeof(double)*psp->l_size_max,psp->error_message); /** - store values of l */ for (index_l=0; index_l < psp->l_size_max; index_l++) { psp->l[index_l] = (double)ptr->l[index_l]; } /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < psp->md_size; index_md++) { /** - a) store number of l values for this mode */ psp->l_size[index_md] = ptr->l_size[index_md]; /** - b) allocate arrays where results will be stored */ class_alloc(psp->cl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); class_alloc(psp->ddcl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); cl_integrand_num_columns = 1+psp->ct_size*2; /* one for k, ct_size for each type, ct_size for each second derivative of each type */ /** d) loop over initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); /* non-diagonal coefficients should be computed only if non-zero correlation */ if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { /* initialize error management flag */ abort = _FALSE_; /* beginning of parallel region */ #pragma omp parallel \ shared(ptr,ppm,index_md,psp,ppt,cl_integrand_num_columns,index_ic1,index_ic2,abort) \ private(tstart,cl_integrand,primordial_pk,transfer_ic1,transfer_ic2,index_l,tstop) { #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_alloc_parallel(cl_integrand, ptr->q_size*cl_integrand_num_columns*sizeof(double), psp->error_message); class_alloc_parallel(primordial_pk, psp->ic_ic_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic1, ptr->tt_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic2, ptr->tt_size[index_md]*sizeof(double), psp->error_message); #pragma omp for schedule (dynamic) /** - loop over l values defined in the transfer module. For each l, compute the C_l's for all types (TT, TE, ...) by convolving primordial spectra with transfer functions. This elementary task is assigned to spectra_compute_cl() */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { #pragma omp flush(abort) class_call_parallel(spectra_compute_cl(pba, ppt, ptr, ppm, psp, index_md, index_ic1, index_ic2, index_l, cl_integrand_num_columns, cl_integrand, primordial_pk, transfer_ic1, transfer_ic2), psp->error_message, psp->error_message); } /* end of loop over l */ #ifdef _OPENMP tstop = omp_get_wtime(); if (psp->spectra_verbose > 1) printf("In %s: time spent in parallel region (loop over l's) = %e s for thread %d\n", __func__,tstop-tstart,omp_get_thread_num()); #endif free(cl_integrand); free(primordial_pk); free(transfer_ic1); free(transfer_ic2); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } else { /* set non-diagonal coefficients to zero if pair of ic's uncorrelated */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } } } } } /** - e) now that for a given mode, all possible C_l's have been computed, compute second derivative of the array in which they are stored, in view of spline interpolation. */ class_call(array_spline_table_lines(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->ddcl[index_md], _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes the C_l's for a given mode, pair of initial conditions * and multipole, but for all types (TT, TE...), by convolving the * transfer functions with the primordial spectra. * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param ppm Input : pointer to primordial structure * @param psp Input/Output: pointer to spectra structure (result stored here) * @param index_md Input : index of mode under consideration * @param index_ic1 Input : index of first initial condition in the correlator * @param index_ic2 Input : index of second initial condition in the correlato * @param index_l Input : index of multipole under consideration * @param cl_integrand_num_column Input : number of columns in cl_integrand * @param cl_integrand Input : an allocated workspace * @param primordial_pk Input : table of primordial spectrum values * @param transfer_ic1 Input : table of transfer function values for first initial condition * @param transfer_ic2 Input : table of transfer function values for second initial condition * @return the error status */ int spectra_compute_cl( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp, int index_md, int index_ic1, int index_ic2, int index_l, int cl_integrand_num_columns, double * cl_integrand, double * primordial_pk, double * transfer_ic1, double * transfer_ic2 ) { int index_q; int index_tt; int index_ct; int index_d1,index_d2; double k; double clvalue; int index_ic1_ic2; double transfer_ic1_temp=0.; double transfer_ic2_temp=0.; double * transfer_ic1_nc=NULL; double * transfer_ic2_nc=NULL; double factor; int index_q_spline=0; index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (ppt->has_cl_number_count == _TRUE_) { class_alloc(transfer_ic1_nc,psp->d_size*sizeof(double),psp->error_message); class_alloc(transfer_ic2_nc,psp->d_size*sizeof(double),psp->error_message); } for (index_q=0; index_q < ptr->q_size; index_q++) { //q = ptr->q[index_q]; k = ptr->k[index_md][index_q]; cl_integrand[index_q*cl_integrand_num_columns+0] = k; class_call(primordial_spectrum_at_k(ppm,index_md,linear,k,primordial_pk), ppm->error_message, psp->error_message); /* above routine checks that k>0: no possible division by zero below */ for (index_tt=0; index_tt < ptr->tt_size[index_md]; index_tt++) { transfer_ic1[index_tt] = ptr->transfer[index_md] [((index_ic1 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; if (index_ic1 == index_ic2) { transfer_ic2[index_tt] = transfer_ic1[index_tt]; } else { transfer_ic2[index_tt] = ptr->transfer[index_md] [((index_ic2 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; } } /* define combinations of transfer functions */ if (ppt->has_cl_cmb_temperature == _TRUE_) { if (_scalars_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_vectors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_tensors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2]; } } if (ppt->has_cl_number_count == _TRUE_) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { transfer_ic1_nc[index_d1] = 0.; transfer_ic2_nc[index_d1] = 0.; if (ppt->has_nc_density == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density+index_d1]; } if (ppt->has_nc_rsd == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_rsd+index_d1] + transfer_ic1[ptr->index_tt_d0+index_d1] + transfer_ic1[ptr->index_tt_d1+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_rsd+index_d1] + transfer_ic2[ptr->index_tt_d0+index_d1] + transfer_ic2[ptr->index_tt_d1+index_d1]; } if (ppt->has_nc_lens == _TRUE_) { transfer_ic1_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic1[ptr->index_tt_nc_lens+index_d1]; transfer_ic2_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic2[ptr->index_tt_nc_lens+index_d1]; } if (ppt->has_nc_gr == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_nc_g1+index_d1] + transfer_ic1[ptr->index_tt_nc_g2+index_d1] + transfer_ic1[ptr->index_tt_nc_g3+index_d1] + transfer_ic1[ptr->index_tt_nc_g4+index_d1] + transfer_ic1[ptr->index_tt_nc_g5+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_nc_g1+index_d1] + transfer_ic2[ptr->index_tt_nc_g2+index_d1] + transfer_ic2[ptr->index_tt_nc_g3+index_d1] + transfer_ic2[ptr->index_tt_nc_g4+index_d1] + transfer_ic2[ptr->index_tt_nc_g5+index_d1]; } } } /* integrand of Cl's */ /* note: we must integrate C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)] where calP(k) is the dimensionless power spectrum equal to a constant in the scale-invariant case, and to P(k) = A_s k^(ns-1) otherwise and q=sqrt(k2+K) (scalars) or sqrt(k2+2K) (vectors) or sqrt(k2+3K) (tensors) In the literature, people often rewrite the integral in terms of q and absorb the Jacobian of the change of variables in a redefinition of the primodial spectrum. Let us illustrate this for scalars: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * 1/[q(q2-K)] This factor 1/[q(q2-K)] is commonly absorbed in the definition of calP. Then one would have C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) Delta2_l(q)] Sometimes in the literature, the factor (k2-3K)=(q2-4K) present in the initial conditions of scalar transfer functions (if normalized to curvature R=1) is also absorbed in the definition of the power spectrum. Then the curvature power spectrum reads calP = (q2-4K)/[q(q2-K)] * (k/k)^ns In CLASS we prefer to define calP = (k/k)^ns like in the flat case, to have the factor (q2-4K) in the initialk conditions, and the factor 1/[q(q2-K)] doesn't need to be there since we integrate over dk/k. For tensors, the change of variable described above gives a slightly different result: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * 1/[q(q2-3K)] But for tensors there are extra curvature-related correction factors to take into account. See the comments in the perturbation module, related to initial conditions for tensors. */ factor = 4. * _PI_ / k; if (psp->has_tt == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tt]= primordial_pk[index_ic1_ic2] * transfer_ic1_temp * transfer_ic2_temp * factor; if (psp->has_ee == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ee]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_e] * factor; if (psp->has_te == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_te]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] + transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp) * factor; if (_tensors_ && (psp->has_bb == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_bb]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_b] * transfer_ic2[ptr->index_tt_b] * factor; if (_scalars_ && (psp->has_pp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pp]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_lcmb] * factor; if (_scalars_ && (psp->has_tp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tp]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp) * factor; if (_scalars_ && (psp->has_ep == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ep]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e]) * factor; if (_scalars_ && (psp->has_dd == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dd+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2_nc[index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_td == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_td+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_pd == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pd+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb]) * factor; } } if (_scalars_ && (psp->has_ll == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ll+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_tl == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tl+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing+index_d1] + transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_dl == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=MAX(index_d1-psp->non_diag,0); index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dl+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } } for (index_ct=0; index_ct<psp->ct_size; index_ct++) { /* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */ if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) || (_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) || (_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) || (_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) || (_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) || (_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) || (_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) || (_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) || (_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) || (_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl)) ) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } /* for non-zero spectra, integrate over q */ else { /* spline the integrand over the whole range of k's */ class_call(array_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, 0, 1+index_ct, 1+psp->ct_size+index_ct, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); /* Technical point: we will now do a spline integral over the whole range of k's, excepted in the closed (K>0) case. In that case, it is a bad idea to spline over the values of k corresponding to nu<nu_flat_approximation. In this region, nu values are integer values, so the steps dq and dk have some discrete jumps. This makes the spline routine less accurate than a trapezoidal integral with finer sampling. So, in the closed case, we set index_q_spline to ptr->index_q_flat_approximation, to tell the integration routine that below this index, it should treat the integral as a trapezoidal one. For testing, one is free to set index_q_spline to 0, to enforce spline integration everywhere, or to (ptr->q_size-1), to enforce trapezoidal integration everywhere. */ if (pba->sgnK == 1) { index_q_spline = ptr->index_q_flat_approximation; } class_call(array_integrate_all_trapzd_or_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, index_q_spline, 0, 1+index_ct, 1+psp->ct_size+index_ct, &clvalue, psp->error_message), psp->error_message, psp->error_message); /* in the closed case, instead of an integral, we have a discrete sum. In practise, this does not matter: the previous routine does give a correct approximation of the discrete sum, both in the trapezoidal and spline regions. The only error comes from the first point: the previous routine assumes a weight for the first point which is too small compared to what it would be in the an actual discrete sum. The line below correct this problem in an exact way. */ if (pba->sgnK == 1) { clvalue += cl_integrand[1+index_ct] * ptr->q[0]/ptr->k[0][0]*sqrt(pba->K)/2.; } /* we have the correct C_l now. We can store it in the transfer structure. */ psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = clvalue; } } if (ppt->has_cl_number_count == _TRUE_) { free(transfer_ic1_nc); free(transfer_ic2_nc); } return _SUCCESS_; } /** * This routine computes the values of k and tau at which the matter * power spectra P(k,tau) and the matter transfer functions T_i(k,tau) * will be stored. * * @param pba Input : pointer to background structure (for z to tau conversion) * @param ppt Input : pointer to perturbation structure (contain source functions) * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_k_and_tau( struct background * pba, struct perturbs * ppt, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_k; int index_tau; double tau_min; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); /** - check the maximum redshift z_max_pk at which P(k,z) and T_i(k,z) should be computable by interpolation. If it is equal to zero, only P(k,z=0) needs to be computed. If it is higher, we will store in a table various P(k,tau) at several values of tau generously encompassing the range 0<z<z_max_pk */ /* if z_max_pk<0, return error */ class_test((psp->z_max_pk < 0), psp->error_message, "asked for negative redshift z=%e",psp->z_max_pk); /* if z_max_pk=0, there is just one value to store */ if (psp->z_max_pk == 0.) { psp->ln_tau_size=1; } /* if z_max_pk>0, store several values (with a confortable margin above z_max_pk) in view of interpolation */ else{ /* find the first relevant value of tau (last value in the table tau_ampling before tau(z_max)) and infer the number of values of tau at which P(k) must be stored */ class_call(background_tau_of_z(pba,psp->z_max_pk,&tau_min), pba->error_message, psp->error_message); index_tau=0; class_test((tau_min < ppt->tau_sampling[index_tau]), psp->error_message, "you asked for zmax=%e, i.e. taumin=%e, smaller than first possible value =%e",psp->z_max_pk,tau_min,ppt->tau_sampling[0]); while (ppt->tau_sampling[index_tau] < tau_min){ index_tau++; } index_tau --; /* whenever possible, take a few more values in to avoid boundary effects in the interpolation */ if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; psp->ln_tau_size=ppt->tau_size-index_tau; } /** - allocate and fill table of tau values at which P(k,tau) and T_i(k,tau) are stored */ class_alloc(psp->ln_tau,sizeof(double)*psp->ln_tau_size,psp->error_message); for (index_tau=0; index_tau<psp->ln_tau_size; index_tau++) { psp->ln_tau[index_tau]=log(ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size]); } /** - allocate and fill table of k values at which P(k,tau) is stored */ psp->ln_k_size = ppt->k_size[ppt->index_md_scalars]; class_alloc(psp->ln_k,sizeof(double)*psp->ln_k_size,psp->error_message); for (index_k=0; index_k<psp->ln_k_size; index_k++) { class_test(ppt->k[ppt->index_md_scalars][index_k] <= 0., psp->error_message, "stop to avoid segmentation fault"); psp->ln_k[index_k]=log(ppt->k[ppt->index_md_scalars][index_k]); } return _SUCCESS_; } /** * This routine computes a table of values for all matter power spectra P(k), * given the source functions and primordial spectra. * * @param pba Input : pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input : pointer to perturbation structure (contain source functions) * @param ppm Input : pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_pk( struct background * pba, struct perturbs * ppt, struct primordial * ppm, struct nonlinear *pnl, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_k; int index_tau; double * primordial_pk; /* array with argument primordial_pk[index_ic_ic] */ double source_ic1; double source_ic2; double ln_pk_tot; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); index_md = psp->index_md_scalars; /** - allocate temporary vectors where the primordial spectrum and the background quantitites will be stored */ class_alloc(primordial_pk,psp->ic_ic_size[index_md]*sizeof(double),psp->error_message); /** - allocate and fill array of P(k,tau) values */ class_alloc(psp->ln_pk, sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md], psp->error_message); if (pnl->method != nl_none) { class_alloc(psp->ln_pk_nl, sizeof(double)*psp->ln_tau_size*psp->ln_k_size, psp->error_message); } else { psp->ln_pk_nl = NULL; } for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { class_call(primordial_spectrum_at_k(ppm,index_md,logarithmic,psp->ln_k[index_k],primordial_pk), ppm->error_message, psp->error_message); ln_pk_tot =0; /* curvature primordial spectrum: P_R(k) = 1/(2pi^2) k^3 <R R> so, primordial curvature correlator: <R R> = (2pi^2) k^-3 P_R(k) so, delta_m correlator: P(k) = <delta_m delta_m> = (2pi^2) k^-3 (source_m)^2 P_R(k) For isocurvature or cross adiabatic-isocurvature parts, replace one or two 'R' by 'S_i's */ /* part diagonal in initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); source_ic1 = ppt->sources[index_md] [index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = log(2.*_PI_*_PI_/exp(3.*psp->ln_k[index_k]) *source_ic1*source_ic1 *exp(primordial_pk[index_ic1_ic2])); ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2]; } /* part non-diagonal in initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { source_ic1 = ppt->sources[index_md] [index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; source_ic2 = ppt->sources[index_md] [index_ic2 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = primordial_pk[index_ic1_ic2]*SIGN(source_ic1)*SIGN(source_ic2); ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2]; } else { psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.; } } } /* if non-linear corrections required, compute the total non-linear matter power spectrum */ if (pnl->method != nl_none) { psp->ln_pk_nl[index_tau * psp->ln_k_size + index_k] = ln_pk_tot + 2.*log(pnl->nl_corr_density[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]); } } } /**- if interpolation of P(k,tau) will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (psp->ln_tau_size > 1) { class_alloc(psp->ddln_pk,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ic_ic_size[index_md]*psp->ln_k_size, psp->ddln_pk, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } /* compute sigma8 (mean variance today in sphere of radius 8/h Mpc */ class_call(spectra_sigma(pba,ppm,psp,8./pba->h,0.,&(psp->sigma8)), psp->error_message, psp->error_message); if (psp->spectra_verbose>0) fprintf(stdout," -> sigma8=%g (computed till k = %g h/Mpc)\n", psp->sigma8, exp(psp->ln_k[psp->ln_k_size-1])/pba->h); /**- if interpolation of P_NL(k,tau) will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (pnl->method != nl_none) { if (psp->ln_tau_size > 1) { class_alloc(psp->ddln_pk_nl,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->ln_pk_nl, psp->ln_k_size, psp->ddln_pk_nl, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } } free (primordial_pk); return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) (does not check that k_max is large * enough) * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param z Input: redhsift * @param R Input: radius in Mpc * @param sigma Output: variance in a sphere of radius R (dimensionless) */ int spectra_sigma( struct background * pba, struct primordial * ppm, struct spectra * psp, double R, double z, double * sigma ) { double pk; double * pk_ic = NULL; double * array_for_sigma; int index_num; int index_k; int index_y; int index_ddy; int i; double k,W,x; if (psp->ic_ic_size[psp->index_md_scalars]>1) class_alloc(pk_ic, psp->ic_ic_size[psp->index_md_scalars]*sizeof(double), psp->error_message); i=0; index_k=i; i++; index_y=i; i++; index_ddy=i; i++; index_num=i; class_alloc(array_for_sigma, psp->ln_k_size*index_num*sizeof(double), psp->error_message); for (i=0;i<psp->ln_k_size;i++) { k=exp(psp->ln_k[i]); if (i == (psp->ln_k_size-1)) k *= 0.9999999; // to prevent rounding error leading to k being bigger than maximum value x=k*R; W=3./x/x/x*(sin(x)-x*cos(x)); class_call(spectra_pk_at_k_and_z(pba,ppm,psp,k,z,&pk,pk_ic), psp->error_message, psp->error_message); array_for_sigma[i*index_num+index_k]=k; array_for_sigma[i*index_num+index_y]=k*k*pk*W*W; } class_call(array_spline(array_for_sigma, index_num, psp->ln_k_size, index_k, index_y, index_ddy, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); class_call(array_integrate_all_spline(array_for_sigma, index_num, psp->ln_k_size, index_k, index_y, index_ddy, sigma, psp->error_message), psp->error_message, psp->error_message); free(array_for_sigma); if (psp->ic_ic_size[psp->index_md_scalars]>1) free(pk_ic); *sigma = sqrt(*sigma/(2.*_PI_*_PI_)); return _SUCCESS_; } /** * This routine computes a table of values for all matter power spectra P(k), * given the source functions and primordial spectra. * * @param pba Input : pointer to background structure (will provide density of each species) * @param ppt Input : pointer to perturbation structure (contain source functions) * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_matter_transfers( struct background * pba, struct perturbs * ppt, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic; int index_k; int index_tau; int last_index_back; double * pvecback_sp_long; /* array with argument pvecback_sp_long[pba->index_bg] */ double delta_i,theta_i,rho_i; double delta_rho_tot,rho_tot; double rho_plus_p_theta_tot,rho_plus_p_tot; int n_ncdm; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); index_md = psp->index_md_scalars; /** - allocate and fill array of T_i(k,tau) values */ class_alloc(psp->matter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message); /** - allocate temporary vectors where the background quantitites will be stored */ class_alloc(pvecback_sp_long,pba->bg_size*sizeof(double),psp->error_message); for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) { class_call(background_at_tau(pba, ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size], /* for this last argument we could have passed exp(psp->ln_tau[index_tau]) but we would then loose precision in the exp(log(x)) operation) */ pba->long_info, pba->inter_normal, &last_index_back, pvecback_sp_long), pba->error_message, psp->error_message); for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) { delta_rho_tot=0.; rho_tot=0.; rho_plus_p_theta_tot=0.; rho_plus_p_tot=0.; /* T_g(k,tau) */ rho_i = pvecback_sp_long[pba->index_bg_rho_g]; if (ppt->has_source_delta_g == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_g] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_g] = delta_i; delta_rho_tot += rho_i * delta_i; rho_tot += rho_i; } if (ppt->has_source_theta_g == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_g] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_g] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; rho_plus_p_tot += 4./3. * rho_i; } /* T_b(k,tau) */ rho_i = pvecback_sp_long[pba->index_bg_rho_b]; if (ppt->has_source_delta_b == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_b] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_b] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_b == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_b] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_b] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; /* T_cdm(k,tau) */ if (pba->has_cdm == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_cdm]; if (ppt->has_source_delta_cdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_cdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_cdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_cdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_cdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_cdm] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; } /* T_dcdm(k,tau) */ if (pba->has_dcdm == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_dcdm]; if (ppt->has_source_delta_dcdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dcdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dcdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_dcdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dcdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dcdm] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; } /* T_scf(k,tau) */ if (pba->has_scf == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_scf]; if (ppt->has_source_delta_scf == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_scf] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_scf] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_scf == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_scf] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_scf] = theta_i; rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]) * theta_i; } rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]); } /* T_fld(k,tau) */ if (pba->has_fld == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_fld]; if (ppt->has_source_delta_fld == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_fld] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_fld] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_fld == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_fld] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_fld] = theta_i; rho_plus_p_theta_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i * theta_i; } rho_plus_p_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i; } /* T_ur(k,tau) */ if (pba->has_ur == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_ur]; if (ppt->has_source_delta_ur == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ur] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ur] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_ur == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ur] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ur] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; } rho_plus_p_tot += 4./3. * rho_i; } /* T_dr(k,tau) */ if (pba->has_dr == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_dr]; if (ppt->has_source_delta_dr == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dr] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dr] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_dr == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dr] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dr] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; } rho_plus_p_tot += 4./3. * rho_i; } /* T_ncdm_i(k,tau) */ if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { rho_i = pvecback_sp_long[pba->index_bg_rho_ncdm1+n_ncdm]; if (ppt->has_source_delta_ncdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ncdm1+n_ncdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ncdm1+n_ncdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_ncdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ncdm1+n_ncdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ncdm1+n_ncdm] = theta_i; rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]) * theta_i; } rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]); } } /* could include homogeneous component in rho_tot if uncommented (leave commented to match CMBFAST/CAMB definition) */ /* if (pba->has_lambda == _TRUE_) { */ /* rho_i = pvecback_sp_long[pba->index_bg_rho_lambda]; */ /* rho_tot += rho_i; */ /* } */ /* T_tot(k,tau) */ if (ppt->has_density_transfers == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_tot] = delta_rho_tot/rho_tot; } if (ppt->has_velocity_transfers == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_tot] = rho_plus_p_theta_tot/rho_plus_p_tot; } } } } /**- if interpolation of P(k,tau) will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (psp->ln_tau_size > 1) { class_alloc(psp->ddmatter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->matter_transfer, psp->ic_size[index_md]*psp->ln_k_size*psp->tr_size, psp->ddmatter_transfer, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } free (pvecback_sp_long); return _SUCCESS_; } int spectra_output_tk_titles(struct background *pba, struct perturbs *ppt, enum file_format output_format, char titles[_MAXTITLESTRINGLENGTH_] ){ int n_ncdm; char tmp[40]; if (output_format == class_format) { class_store_columntitle(titles,"k (h/Mpc)",_TRUE_); if (ppt->has_density_transfers == _TRUE_) { class_store_columntitle(titles,"d_g",_TRUE_); class_store_columntitle(titles,"d_b",_TRUE_); class_store_columntitle(titles,"d_cdm",pba->has_cdm); class_store_columntitle(titles,"d_fld",pba->has_fld); class_store_columntitle(titles,"d_ur",pba->has_ur); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { sprintf(tmp,"d_ncdm[%d]",n_ncdm); class_store_columntitle(titles,tmp,_TRUE_); } } class_store_columntitle(titles,"d_dcdm",pba->has_dcdm); class_store_columntitle(titles,"d_dr",pba->has_dr); class_store_columntitle(titles,"d_scf",pba->has_scf); class_store_columntitle(titles,"d_tot",_TRUE_); } if (ppt->has_velocity_transfers == _TRUE_) { class_store_columntitle(titles,"t_g",_TRUE_); class_store_columntitle(titles,"t_b",_TRUE_); class_store_columntitle(titles,"t_cdm",((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous))); class_store_columntitle(titles,"t_fld",pba->has_fld); class_store_columntitle(titles,"t_ur",pba->has_ur); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { sprintf(tmp,"t_ncdm[%d]",n_ncdm); class_store_columntitle(titles,tmp,_TRUE_); } } class_store_columntitle(titles,"t_dcdm",pba->has_dcdm); class_store_columntitle(titles,"t_dr",pba->has_dr); class_store_columntitle(titles,"t__scf",pba->has_scf); class_store_columntitle(titles,"t_tot",_TRUE_); } } else if (output_format == camb_format) { class_store_columntitle(titles,"k (h/Mpc)",_TRUE_); class_store_columntitle(titles,"-T_cdm/k2",_TRUE_); class_store_columntitle(titles,"-T_b/k2",_TRUE_); class_store_columntitle(titles,"-T_g/k2",_TRUE_); class_store_columntitle(titles,"-T_ur/k2",_TRUE_); class_store_columntitle(titles,"-T_ncdm/k2",_TRUE_); class_store_columntitle(titles,"-T_tot/k2",_TRUE_); } return _SUCCESS_; } int spectra_output_tk_data( struct background * pba, struct perturbs * ppt, struct spectra * psp, enum file_format output_format, double z, int number_of_titles, double *data ) { int n_ncdm; double k, k_over_h, k2; double * tkfull=NULL; /* array with argument pk_ic[(index_k * psp->ic_size[index_md] + index_ic)*psp->tr_size+index_tr] */ double *tk; double *dataptr; int index_md=0; int index_ic; int index_k; int index_tr; int storeidx; if (psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size > 0){ class_alloc(tkfull, psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size*sizeof(double), psp->error_message); } /** - compute T_i(k) for each k (if several ic's, compute it for each ic; if z_pk = 0, this is done by directly reading inside the pre-computed table; if not, this is done by interpolating the table at the correct value of tau. */ /* if z_pk = 0, no interpolation needed */ if (z == 0.) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_tr=0; index_tr<psp->tr_size; index_tr++) { for (index_ic=0; index_ic<psp->ic_size[index_md]; index_ic++) { tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr] = psp->matter_transfer[(((psp->ln_tau_size-1)*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr]; } } } } /* if 0 <= z_pk <= z_max_pk, interpolation needed, */ else { class_call(spectra_tk_at_z(pba, psp, z, tkfull), psp->error_message, psp->error_message); } /** - store data */ for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { storeidx = 0; dataptr = data+index_ic*(psp->ln_k_size*number_of_titles)+index_k*number_of_titles; tk = &(tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size]); k = exp(psp->ln_k[index_k]); k2 = k*k; k_over_h = k/pba->h; class_store_double(dataptr, k_over_h, _TRUE_,storeidx); /* indices for species associated with a velocity transfer function in Fourier space */ if (output_format == class_format) { if (ppt->has_density_transfers == _TRUE_) { class_store_double(dataptr,tk[psp->index_tr_delta_g],ppt->has_source_delta_g,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_b],ppt->has_source_delta_b,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_cdm],ppt->has_source_delta_cdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_fld],ppt->has_source_delta_fld,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_ur],ppt->has_source_delta_ur,storeidx); if (pba->has_ncdm == _TRUE_){ for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr,tk[psp->index_tr_delta_ncdm1+n_ncdm],ppt->has_source_delta_ncdm,storeidx); } } class_store_double(dataptr,tk[psp->index_tr_delta_dcdm],ppt->has_source_delta_dcdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_dr],ppt->has_source_delta_dr,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_scf],ppt->has_source_delta_scf,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_tot],_TRUE_,storeidx); } if (ppt->has_velocity_transfers == _TRUE_) { class_store_double(dataptr,tk[psp->index_tr_theta_g],ppt->has_source_theta_g,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_b],ppt->has_source_theta_b,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_cdm],ppt->has_source_theta_cdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_fld],ppt->has_source_theta_fld,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_ur],ppt->has_source_theta_ur,storeidx); if (pba->has_ncdm == _TRUE_){ for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr,tk[psp->index_tr_theta_ncdm1+n_ncdm],ppt->has_source_theta_ncdm,storeidx); } } class_store_double(dataptr,tk[psp->index_tr_theta_dcdm],ppt->has_source_theta_dcdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_dr],ppt->has_source_theta_dr,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_scf],ppt->has_source_theta_scf,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_tot],_TRUE_,storeidx); } } else if (output_format == camb_format) { /* rescale and reorder the matter transfer functions following the CMBFAST/CAMB convention */ class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_cdm]/k2,ppt->has_source_delta_cdm,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_b]/k2,ppt->has_source_delta_b,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_g]/k2,ppt->has_source_delta_g,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ur]/k2,ppt->has_source_delta_ur,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ncdm1]/k2,ppt->has_source_delta_ncdm,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_tot]/k2,_TRUE_,storeidx,0.0); } } } //Neccessary because the size could be zero (if psp->tr_size is zero) if (tkfull != NULL) free(tkfull); return _SUCCESS_; } int spectra_firstline_and_ic_suffix(struct perturbs *ppt, int index_ic, char first_line[_LINE_LENGTH_MAX_], FileName ic_suffix){ first_line[0]='\0'; ic_suffix[0]='\0'; if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) { strcpy(ic_suffix,"ad"); strcpy(first_line,"for adiabatic (AD) mode (normalized to initial curvature=1) "); } if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) { strcpy(ic_suffix,"bi"); strcpy(first_line,"for baryon isocurvature (BI) mode (normalized to initial entropy=1)"); } if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) { strcpy(ic_suffix,"cdi"); strcpy(first_line,"for CDM isocurvature (CDI) mode (normalized to initial entropy=1)"); } if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) { strcpy(ic_suffix,"nid"); strcpy(first_line,"for neutrino density isocurvature (NID) mode (normalized to initial entropy=1)"); } if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) { strcpy(ic_suffix,"niv"); strcpy(first_line,"for neutrino velocity isocurvature (NIV) mode (normalized to initial entropy=1)"); } return _SUCCESS_; }
GB_unop__identity_fc64_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_bool) // op(A') function: GB (_unop_tran__identity_fc64_bool) // C type: GxB_FC64_t // A type: bool // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_bool) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trsm_x_csc_u_lo_row.c
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif //initialize y as x*alpha for(int i = 0 ; i < m;i++){ for(int j = 0 ; j < columns ; j++){ //initialize y[] as x[]*aplha alpha_mul(y[index2(i,j,ldy)], x[index2(i,j,ldx)] ,alpha); } } //csc format, traverse by column for(ALPHA_INT c = 0; c < n;++c){ //following processing simulates Gaussian Elimination //step 1: processing the lower right diagonal ele such that the coefficient equals to 1 // for(ALPHA_INT out_y_col = 0; out_y_col < columns;out_y_col++){ // alpha_div( y[index2(c,out_y_col,ldy)] , y[index2(c,out_y_col,ldy)] ,diag[c]); // } for(ALPHA_INT ai = A->cols_start[c]; ai < A->cols_end[c];ai++){ ALPHA_INT ar = A->row_indx[ai]; if(c < ar){ //step 2: use the diagonal ele to eliminate coefficients of other rows at the same column #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns;out_y_col++){ alpha_msube(y[index2(ar,out_y_col,ldy)],A->values[ai],y[index2(c,out_y_col,ldy)]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }