source
stringlengths
3
92
c
stringlengths
26
2.25M
hello_par.c
#include <stdio.h> #include <omp.h> int main () { int nthreads = 4; // omp_set_num_threads(nthreads); #pragma omp parallel { int id = omp_get_thread_num(); printf("Hello World from thread = %d", id); printf(" with %d threads\n",omp_get_num_threads()); } printf("all done, with hopefully %d threads\n",nthreads); }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
LG_check_ktruss.c
//------------------------------------------------------------------------------ // LG_check_ktruss: construct the ktruss of a graph (simple method) //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // See additional acknowledgments in the LICENSE file, // or contact permission@sei.cmu.edu for the full terms. // Contributed by Timothy A. Davis, Texas A&M University //------------------------------------------------------------------------------ // A very slow, bare-bones ktruss method. This method is for testing only, to // check the result of other, faster methods. Do not benchmark this method; it // is slow and simple by design. G->A must be symmetric, with no entries on // its diagonal. #define LG_FREE_WORK \ { \ LAGraph_Free ((void **) &Cp, NULL) ; \ LAGraph_Free ((void **) &Cj, NULL) ; \ LAGraph_Free ((void **) &Cx, NULL) ; \ LAGraph_Free ((void **) &Ax, NULL) ; \ } #define LG_FREE_ALL \ { \ LG_FREE_WORK ; \ GrB_free (&C) ; \ } #include "LG_internal.h" #include "LG_test.h" int LG_check_ktruss ( // output GrB_Matrix *C_handle, // the ktruss of G->A, of type GrB_UINT32 // input LAGraph_Graph G, // the structure of G->A must be symmetric uint32_t k, char *msg ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; GrB_Matrix C = NULL ; GrB_Index *Cp = NULL, *Cj = NULL ; uint32_t *Cx = NULL ; void *Ax = NULL ; GrB_Index n, ncols, Cp_len, Cj_len, Cx_len, nvals1, nvals2 ; LG_ASSERT (C_handle != NULL, GrB_NULL_POINTER) ; LG_TRY (LAGraph_CheckGraph (G, msg)) ; LG_ASSERT_MSG (G->nself_edges == 0, -104, "G->nself_edges must be zero") ; if (G->kind == LAGraph_ADJACENCY_UNDIRECTED || (G->kind == LAGraph_ADJACENCY_DIRECTED && G->is_symmetric_structure == LAGraph_TRUE)) { // the structure of A is known to be symmetric ; } else { // A is not known to be symmetric LG_ASSERT_MSG (false, -1005, "G->A must be symmetric") ; } GRB_TRY (GrB_Matrix_nrows (&n, G->A)) ; GRB_TRY (GrB_Matrix_ncols (&ncols, G->A)) ; LG_ASSERT_MSG (n == ncols, -1001, "A must be square") ; //-------------------------------------------------------------------------- // export G->A in CSR form and discard its values //-------------------------------------------------------------------------- size_t typesize ; LG_TRY (LG_check_export (G, &Cp, &Cj, &Ax, &Cp_len, &Cj_len, &Cx_len, &typesize, msg)) ; LAGraph_Free ((void **) &Ax, NULL) ; //-------------------------------------------------------------------------- // allocate Cx //-------------------------------------------------------------------------- LG_TRY (LAGraph_Malloc ((void **) &Cx, Cx_len, sizeof (uint32_t), msg)) ; //-------------------------------------------------------------------------- // construct the k-truss of G->A //-------------------------------------------------------------------------- while (true) { //---------------------------------------------------------------------- // compute the # of triangles incident on each edge of C //---------------------------------------------------------------------- // masked dot-product method: C{C}=C*C' using the PLUS_ONE semiring #if !defined ( COVERAGE ) #pragma omp parallel for schedule(dynamic,1024) #endif for (int64_t i = 0 ; i < n ; i++) { // for each entry in C(i,:) for (int64_t p = Cp [i] ; p < Cp [i+1] ; p++) { const int64_t j = Cj [p] ; uint32_t cij = 0 ; // cij += C(i,:) * C(j,:)' int64_t p1 = Cp [i] ; int64_t p1_end = Cp [i+1] ; int64_t p2 = Cp [j] ; int64_t p2_end = Cp [j+1] ; while (p1 < p1_end && p2 < p2_end) { int64_t j1 = Cj [p1] ; int64_t j2 = Cj [p2] ; if (j1 < j2) { // C(i,j1) appears before C(j,j2) p1++ ; } else if (j2 < j1) { // C(j,j2) appears before C(i,j1) p2++ ; } else // j1 == j2 { // C(i,j1) and C(j,j1) are the next entries to merge cij++ ; p1++ ; p2++ ; } } Cx [p] = cij ; } } //---------------------------------------------------------------------- // import C in CSR form //---------------------------------------------------------------------- GRB_TRY (GrB_Matrix_import_UINT32 (&C, GrB_UINT32, n, n, Cp, Cj, Cx, Cp_len, Cj_len, Cx_len, GrB_CSR_FORMAT)) ; GRB_TRY (GrB_Matrix_nvals (&nvals1, C)) ; //---------------------------------------------------------------------- // keep entries >= k-2 and check for convergence //---------------------------------------------------------------------- GRB_TRY (GrB_select (C, NULL, NULL, GrB_VALUEGE_UINT32, C, k-2, NULL)) ; GRB_TRY (GrB_Matrix_nvals (&nvals2, C)) ; if (nvals1 == nvals2) { // C is now the k-truss of G->A LG_FREE_WORK ; (*C_handle) = C ; return (GrB_SUCCESS) ; } //---------------------------------------------------------------------- // export C in CSR form for the next iteration and free it //---------------------------------------------------------------------- GRB_TRY (GrB_Matrix_export_UINT32 (Cp, Cj, Cx, &Cp_len, &Cj_len, &Cx_len, GrB_CSR_FORMAT, C)) ; GRB_TRY (GrB_free (&C)) ; } }
edge_miner.h
#ifndef EDGE_MINER_H #define EDGE_MINER_H #include <mutex> #include <numeric> #include "miner.h" #include "domain_support.h" typedef std::pair<unsigned, unsigned> InitPattern; typedef QuickPattern<EdgeEmbedding, ElementType> QPattern; typedef CanonicalGraph<EdgeEmbedding, ElementType> CPattern; typedef std::unordered_map<QPattern, Frequency> QpMapFreq; // quick pattern map (mapping quick pattern to its frequency) typedef std::unordered_map<CPattern, Frequency> CgMapFreq; // canonical pattern map (mapping canonical pattern to its frequency) typedef std::map<InitPattern, DomainSupport*> InitMap; typedef std::unordered_map<QPattern, DomainSupport*> QpMapDomain; // quick pattern map (mapping quick pattern to its domain support) typedef std::unordered_map<CPattern, DomainSupport*> CgMapDomain; // canonical pattern map (mapping canonical pattern to its domain support) typedef std::unordered_map<unsigned, unsigned> FreqMap; typedef std::unordered_map<unsigned, bool> DomainMap; typedef PerThreadStorage<InitMap> LocalInitMap; typedef PerThreadStorage<QpMapFreq> LocalQpMapFreq; // PerThreadStorage: thread-local quick pattern map typedef PerThreadStorage<CgMapFreq> LocalCgMapFreq; // PerThreadStorage: thread-local canonical pattern map typedef PerThreadStorage<QpMapDomain> LocalQpMapDomain; typedef PerThreadStorage<CgMapDomain> LocalCgMapDomain; class EdgeMiner : public Miner { public: EdgeMiner(Graph *g, unsigned size = 3, int nthreads = 1) { graph = g; max_size = size; numThreads = nthreads; construct_edgemap(); init_localmaps.set_size(nthreads); qp_localmaps.set_size(nthreads); cg_localmaps.set_size(nthreads); } virtual ~EdgeMiner() {} void extend_edge(unsigned level, EmbeddingList& emb_list) { UintList num_new_emb(emb_list.size()); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { EdgeEmbedding emb(level+1); get_embedding(level, pos, emb_list, emb); num_new_emb[pos] = 0; unsigned n = emb.size(); std::set<VertexId> vert_set; if (n > 3) for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i)); for (unsigned i = 0; i < n; ++i) { VertexId src = emb.get_vertex(i); if (emb.get_key(i) == 0) { // TODO: need to fix this IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); BYTE existed = 0; //if (is_frequent_edge[e]) if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set)) num_new_emb[pos] ++; } } } emb.clean(); } Ulong new_size = std::accumulate(num_new_emb.begin(), num_new_emb.end(), (Ulong)0); std::cout << "new_size = " << new_size << "\n"; assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32 UintList indices = parallel_prefix_sum(num_new_emb); new_size = indices[indices.size()-1]; emb_list.add_level(new_size); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(level); pos ++) { EdgeEmbedding emb(level+1); get_embedding(level, pos, emb_list, emb); unsigned start = indices[pos]; unsigned n = emb.size(); std::set<VertexId> vert_set; if (n > 3) for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i)); for (unsigned i = 0; i < n; ++i) { IndexT src = emb.get_vertex(i); if (emb.get_key(i) == 0) { IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); BYTE existed = 0; //if (is_frequent_edge[e]) if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set)) { emb_list.set_idx(level+1, start, pos); emb_list.set_his(level+1, start, i); emb_list.set_vid(level+1, start++, dst); } } } } } } inline unsigned init_aggregator() { init_map.clear(); for (IndexT src = 0; src < graph->num_vertices(); src ++) { InitMap *lmap = init_localmaps.getLocal(); auto src_label = graph->getData(src); IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); auto dst_label = graph->getData(dst); if (src_label <= dst_label) { InitPattern key = get_init_pattern(src_label, dst_label); if (lmap->find(key) == lmap->end()) { (*lmap)[key] = new DomainSupport(2); (*lmap)[key]->set_threshold(threshold); } (*lmap)[key]->add_vertex(0, src); (*lmap)[key]->add_vertex(1, dst); } } } merge_init_map(); std::cout << "Number of single-edge patterns: " << init_map.size() << "\n"; unsigned count = 0; for (auto it = init_map.begin(); it != init_map.end(); ++it) if (it->second->get_support()) count ++; return count; // return number of frequent single-edge patterns } inline void quick_aggregate(unsigned level, EmbeddingList& emb_list) { for (auto i = 0; i < numThreads; i++) qp_localmaps.getLocal(i)->clear(); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { QpMapDomain *lmap = qp_localmaps.getLocal(); EdgeEmbedding emb(level+1); get_embedding(level, pos, emb_list, emb); unsigned n = emb.size(); QPattern qp(emb, true); bool qp_existed = false; auto it = lmap->find(qp); if (it == lmap->end()) { (*lmap)[qp] = new DomainSupport(n); (*lmap)[qp]->set_threshold(threshold); emb_list.set_pid(pos, qp.get_id()); } else { qp_existed = true; emb_list.set_pid(pos, (it->first).get_id()); } for (unsigned i = 0; i < n; i ++) { if ((*lmap)[qp]->has_domain_reached_support(i) == false) (*lmap)[qp]->add_vertex(i, emb.get_vertex(i)); } if (qp_existed) qp.clean(); } } void insert_id_map(int qp_id, int cg_id) { std::unique_lock<std::mutex> lock(map_mutex); id_map.insert(std::make_pair(qp_id, cg_id)); } // aggregate quick patterns into canonical patterns. // construct id_map from quick pattern ID (qp_id) to canonical pattern ID (cg_id) void canonical_aggregate() { id_map.clear(); for (auto i = 0; i < numThreads; i++) cg_localmaps.getLocal(i)->clear(); for (std::pair<QPattern, DomainSupport*> element : qp_map) { CgMapDomain *lmap = cg_localmaps.getLocal(); unsigned num_domains = element.first.get_size(); CPattern cg(element.first); int qp_id = element.first.get_id(); int cg_id = cg.get_id(); insert_id_map(qp_id, cg_id); auto it = lmap->find(cg); if (it == lmap->end()) { (*lmap)[cg] = new DomainSupport(num_domains); (*lmap)[cg]->set_threshold(threshold); element.first.set_cgid(cg.get_id()); } else { element.first.set_cgid((it->first).get_id()); } VertexPositionEquivalences equivalences; element.first.get_equivalences(equivalences); for (unsigned i = 0; i < num_domains; i ++) { if ((*lmap)[cg]->has_domain_reached_support(i) == false) { unsigned qp_idx = cg.get_quick_pattern_index(i); assert(qp_idx >= 0 && qp_idx < num_domains); UintSet equ_set = equivalences.get_equivalent_set(qp_idx); for (unsigned idx : equ_set) { DomainSupport *support = element.second; if (support->has_domain_reached_support(idx) == false) { bool reached_threshold = (*lmap)[cg]->add_vertices(i, support->domain_sets[idx]); if (reached_threshold) break; } else { (*lmap)[cg]->set_domain_frequent(i); break; } } } } cg.clean(); } } inline void merge_init_map() { init_map = *(init_localmaps.getLocal(0)); for (auto i = 1; i < numThreads; i++) { for (auto element : *init_localmaps.getLocal(i)) { DomainSupport *support = element.second; if (init_map.find(element.first) == init_map.end()) { init_map[element.first] = support; } else { for (unsigned i = 0; i < 2; i ++) { if (!init_map[element.first]->has_domain_reached_support(i)) { if (support->has_domain_reached_support(i)) init_map[element.first]->set_domain_frequent(i); else init_map[element.first]->add_vertices(i, support->domain_sets[i]); } } } } } } inline void merge_qp_map(unsigned num_domains) { qp_map.clear(); qp_map = *(qp_localmaps.getLocal(0)); for (auto i = 1; i < numThreads; i++) { const QpMapDomain *lmap = qp_localmaps.getLocal(i); for (auto element : *lmap) { if (qp_map.find(element.first) == qp_map.end()) qp_map[element.first] = element.second; } for (std::pair<QPattern, DomainSupport*> element : *lmap) { DomainSupport *support = element.second; for (unsigned i = 0; i < num_domains; i ++) { if (!qp_map[element.first]->has_domain_reached_support(i) && qp_map[element.first] != support) { if (support->has_domain_reached_support(i)) qp_map[element.first]->set_domain_frequent(i); else qp_map[element.first]->add_vertices(i, support->domain_sets[i]); } } } } } inline void merge_cg_map(unsigned num_domains) { cg_map.clear(); cg_map = *(cg_localmaps.getLocal(0)); for (auto i = 1; i < numThreads; i++) { const CgMapDomain *lmap = cg_localmaps.getLocal(i); for (auto element : *lmap) { if (cg_map.find(element.first) == cg_map.end()) cg_map[element.first] = element.second; } for (std::pair<CPattern, DomainSupport*> element : *lmap) { DomainSupport *support = element.second; for (unsigned i = 0; i < num_domains; i ++) { if (!cg_map[element.first]->has_domain_reached_support(i) && cg_map[element.first] != support) { if (support->has_domain_reached_support(i)) cg_map[element.first]->set_domain_frequent(i); else cg_map[element.first]->add_vertices(i, support->domain_sets[i]); } } } } } // Filtering for FSM #ifdef ENABLE_LABEL inline void init_filter(EmbeddingList& emb_list) { UintList is_frequent_emb(emb_list.size(), 0); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { VertexId src = emb_list.get_idx(1, pos); VertexId dst = emb_list.get_vid(1, pos); auto src_label = graph->getData(src); auto dst_label = graph->getData(dst); InitPattern key = get_init_pattern(src_label, dst_label); if (init_map[key]->get_support()) is_frequent_emb[pos] = 1; } //assert(emb_list.size()*2 == graph->num_edges()); // symmetric graph is_frequent_edge.resize(graph->num_edges()); std::fill(is_frequent_edge.begin(), is_frequent_edge.end(), 0); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { if (is_frequent_emb[pos]) { VertexId src = emb_list.get_idx(1, pos); VertexId dst = emb_list.get_vid(1, pos); unsigned eid0 = edge_map[OrderedEdge(src,dst)]; unsigned eid1 = edge_map[OrderedEdge(dst,src)]; __sync_bool_compare_and_swap(&is_frequent_edge[eid0], 0, 1); __sync_bool_compare_and_swap(&is_frequent_edge[eid1], 0, 1); } } std::cout << "Number of frequent edges: " << count(is_frequent_edge.begin(), is_frequent_edge.end(), 1) << "\n"; UintList indices = parallel_prefix_sum(is_frequent_emb); auto vid_list0 = emb_list.get_idx_list(1); auto vid_list1 = emb_list.get_vid_list(1); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { if (is_frequent_emb[pos]) { VertexId src = vid_list0[pos]; VertexId dst = vid_list1[pos]; unsigned start = indices[pos]; emb_list.set_vid(1, start, dst); emb_list.set_idx(1, start, src); } } emb_list.remove_tail(indices.back()); } #endif inline void filter(unsigned level, EmbeddingList &emb_list) { UintList is_frequent_emb(emb_list.size(), 0); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { unsigned qp_id = emb_list.get_pid(pos); unsigned cg_id = id_map.at(qp_id); if (domain_support_map.at(cg_id)) is_frequent_emb[pos] = 1; } UintList indices = parallel_prefix_sum(is_frequent_emb); VertexList vid_list = emb_list.get_vid_list(level); UintList idx_list = emb_list.get_idx_list(level); ByteList his_list = emb_list.get_his_list(level); for (size_t pos = 0; pos < emb_list.size(); pos ++) { if (is_frequent_emb[pos]) { unsigned start = indices[pos]; VertexId vid = vid_list[pos]; IndexTy idx = idx_list[pos]; BYTE his = his_list[pos]; emb_list.set_idx(level, start, idx); emb_list.set_vid(level, start, vid); emb_list.set_his(level, start, his); } } emb_list.remove_tail(indices.back()); } inline void set_threshold(const unsigned minsup) { threshold = minsup; } inline void printout_agg(const CgMapFreq &cg_map) { for (auto it = cg_map.begin(); it != cg_map.end(); ++it) std::cout << "{" << it->first << " --> " << it->second << std::endl; } inline void printout_agg() { std::cout << "num_patterns: " << cg_map.size() << " num_quick_patterns: " << qp_map.size() << "\n"; BoolVec support(cg_map.size()); int i = 0; for (auto it = cg_map.begin(); it != cg_map.end(); ++it) { support[i] = it->second->get_support(); i ++; } i = 0; for (auto it = cg_map.begin(); it != cg_map.end(); ++it) { std::cout << "{" << it->first << " --> " << support[i] << std::endl; i ++; } } inline unsigned support_count() { domain_support_map.clear(); unsigned count = 0; for (auto it = cg_map.begin(); it != cg_map.end(); ++it) { bool support = it->second->get_support(); domain_support_map.insert(std::make_pair(it->first.get_id(), support)); if (support) count ++; } return count; } // construct edge-map for later use. May not be necessary if Galois has this support void construct_edgemap() { for (auto src = 0; src < graph->num_vertices(); src ++) { IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { auto dst = graph->getEdgeDst(e); OrderedEdge edge(src, dst); edge_map.insert(std::pair<OrderedEdge, unsigned>(edge, e)); } } } private: unsigned threshold; InitMap init_map; UintMap id_map; unsigned max_size; int numThreads; FreqMap freq_support_map; DomainMap domain_support_map; std::map<OrderedEdge, unsigned> edge_map; std::set<std::pair<VertexId,VertexId> > freq_edge_set; std::vector<unsigned> is_frequent_edge; LocalInitMap init_localmaps; // initialization map, only used for once, no need to clear LocalQpMapDomain qp_localmaps; // quick pattern local map for each thread LocalCgMapDomain cg_localmaps; // canonical pattern local map for each thread QpMapDomain qp_map; // quick pattern map CgMapDomain cg_map; // canonical graph map std::mutex map_mutex; inline InitPattern get_init_pattern(BYTE src_label, BYTE dst_label) { if (src_label <= dst_label) return std::make_pair(src_label, dst_label); else return std::make_pair(dst_label, src_label); } inline void get_embedding(unsigned level, unsigned pos, const EmbeddingList& emb_list, EdgeEmbedding &emb) { VertexId vid = emb_list.get_vid(level, pos); IndexTy idx = emb_list.get_idx(level, pos); BYTE his = emb_list.get_his(level, pos); BYTE lab = graph->getData(vid); ElementType ele(vid, 0, lab, his); emb.set_element(level, ele); for (unsigned l = 1; l < level; l ++) { vid = emb_list.get_vid(level-l, idx); his = emb_list.get_his(level-l, idx); lab = graph->getData(vid); ElementType ele(vid, 0, lab, his); emb.set_element(level-l, ele); idx = emb_list.get_idx(level-l, idx); } lab = graph->getData(idx); ElementType ele0(idx, 0, lab, 0); emb.set_element(0, ele0); } bool is_quick_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed) { if (dst <= emb.get_vertex(0)) return true; if (dst == emb.get_vertex(1)) return true; if (history == 0 && dst < emb.get_vertex(1)) return true; if (size == 2) { } else if (size == 3) { if (history == 0 && emb.get_history(2) == 0 && dst <= emb.get_vertex(2)) return true; if (history == 0 && emb.get_history(2) == 1 && dst == emb.get_vertex(2)) return true; if (history == 1 && emb.get_history(2) == 1 && dst <= emb.get_vertex(2)) return true; if (dst == emb.get_vertex(2)) existed = 1; //if (!existed && max_size < 4) return true; } else { std::cout << "Error: should go to detailed check\n"; } return false; } bool is_edge_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed, const std::set<VertexId>& vertex_set) { if (size < 3) return is_quick_automorphism(size, emb, history, src, dst, existed); // check with the first element if (dst <= emb.get_vertex(0)) return true; if (history == 0 && dst <= emb.get_vertex(1)) return true; // check loop edge if (dst == emb.get_vertex(emb.get_history(history))) return true; if (vertex_set.find(dst) != vertex_set.end()) existed = 1; // check to see if there already exists the vertex added; // if so, just allow to add edge which is (smaller id -> bigger id) if (existed && src > dst) return true; std::pair<VertexId, VertexId> added_edge(src, dst); for (unsigned index = history + 1; index < emb.size(); ++index) { std::pair<VertexId, VertexId> edge; edge.first = emb.get_vertex(emb.get_history(index)); edge.second = emb.get_vertex(index); //assert(edge.first != edge.second); int cmp = compare(added_edge, edge); if(cmp <= 0) return true; } return false; } inline void swap(std::pair<VertexId, VertexId>& pair) { if (pair.first > pair.second) { VertexId tmp = pair.first; pair.first = pair.second; pair.second = tmp; } } inline int compare(std::pair<VertexId, VertexId>& oneEdge, std::pair<VertexId, VertexId>& otherEdge) { swap(oneEdge); swap(otherEdge); if(oneEdge.first == otherEdge.first) return oneEdge.second - otherEdge.second; else return oneEdge.first - otherEdge.first; } }; #endif // EDGE_MINER_HPP_
GB_binop__le_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__le_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint16) // A*D function (colscale): GB (_AxD__le_uint16) // D*A function (rowscale): GB (_DxB__le_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint16) // C=scalar+B GB (_bind1st__le_uint16) // C=scalar+B' GB (_bind1st_tran__le_uint16) // C=A+scalar GB (_bind2nd__le_uint16) // C=A'+scalar GB (_bind2nd_tran__le_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT16 || GxB_NO_LE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
seq_multivector.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 1.9 $ ***********************************************************************EHEADER*/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "seq_multivector.h" #include "_hypre_utilities.h" #include <stdlib.h> #include <string.h> #include <assert.h> /*-------------------------------------------------------------------------- * hypre_SeqMultivectorCreate *--------------------------------------------------------------------------*/ hypre_Multivector * hypre_SeqMultivectorCreate( HYPRE_Int size, HYPRE_Int num_vectors ) { hypre_Multivector *mvector; mvector = (hypre_Multivector *) hypre_MAlloc(sizeof(hypre_Multivector)); hypre_MultivectorNumVectors(mvector) = num_vectors; hypre_MultivectorSize(mvector) = size; hypre_MultivectorOwnsData(mvector) = 1; hypre_MultivectorData(mvector) = NULL; mvector->num_active_vectors=0; mvector->active_indices=NULL; return mvector; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorInitialize( hypre_Multivector *mvector ) { HYPRE_Int ierr = 0, i, size, num_vectors; size = hypre_MultivectorSize(mvector); num_vectors = hypre_MultivectorNumVectors(mvector); if (NULL==hypre_MultivectorData(mvector)) hypre_MultivectorData(mvector) = (double *) hypre_MAlloc(sizeof(double)*size*num_vectors); /* now we create a "mask" of "active" vectors; initially all active */ if (NULL==mvector->active_indices) { mvector->active_indices=hypre_CTAlloc(HYPRE_Int, num_vectors); for (i=0; i<num_vectors; i++) mvector->active_indices[i] = i; mvector->num_active_vectors=num_vectors; } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorSetDataOwner(hypre_Multivector *mvector, HYPRE_Int owns_data) { HYPRE_Int ierr=0; hypre_MultivectorOwnsData(mvector) = owns_data; return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorDestroy(hypre_Multivector *mvector) { HYPRE_Int ierr=0; if (NULL!=mvector) { if (hypre_MultivectorOwnsData(mvector) && NULL!=hypre_MultivectorData(mvector)) hypre_TFree( hypre_MultivectorData(mvector) ); if (NULL!=mvector->active_indices) hypre_TFree(mvector->active_indices); hypre_TFree(mvector); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorSetMask * (this routine accepts mask in "zeros and ones format, and converts it to the one used in the structure "hypre_Multivector") *-------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorSetMask(hypre_Multivector *mvector, HYPRE_Int * mask) { HYPRE_Int i, num_vectors = mvector->num_vectors; if (mvector->active_indices != NULL) hypre_TFree(mvector->active_indices); mvector->active_indices=hypre_CTAlloc(HYPRE_Int, num_vectors); mvector->num_active_vectors=0; if (mask!=NULL) for (i=0; i<num_vectors; i++) { if ( mask[i] ) mvector->active_indices[mvector->num_active_vectors++]=i; } else for (i=0; i<num_vectors; i++) mvector->active_indices[mvector->num_active_vectors++]=i; return 0; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorSetConstantValues(hypre_Multivector *v, double value) { HYPRE_Int i, j, start_offset, end_offset; HYPRE_Int size = hypre_MultivectorSize(v); double *vector_data = hypre_MultivectorData(v); if (v->num_active_vectors == v->num_vectors) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < v->num_vectors*size; j++) vector_data[j] = value; } else { for (i = 0; i < v->num_active_vectors; i++) { start_offset = v->active_indices[i]*size; end_offset = start_offset+size; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = start_offset; j < end_offset; j++) vector_data[j]= value; } } return 0; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorSetRandomValues * * returns vector of values randomly distributed between -1.0 and +1.0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorSetRandomValues(hypre_Multivector *v, HYPRE_Int seed) { HYPRE_Int i, j, start_offset, end_offset; HYPRE_Int size = hypre_MultivectorSize(v); double *vector_data = hypre_MultivectorData(v); hypre_SeedRand(seed); /* comment from vector.c: RDF: threading this loop may cause problems because of hypre_Rand() */ if (v->num_active_vectors == v->num_vectors) { for (j = 0; j < v->num_vectors*size; j++) vector_data[j] = 2.0 * hypre_Rand() - 1.0; } else { for (i = 0; i < v->num_active_vectors; i++) { start_offset = v->active_indices[i]*size; end_offset = start_offset+size; for (j = start_offset; j < end_offset; j++) vector_data[j]= 2.0 * hypre_Rand() - 1.0; } } return 0; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorCopy * copies data from x to y * y should have already been initialized at the same size as x *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorCopy(hypre_Multivector *x, hypre_Multivector *y) { HYPRE_Int i, size, num_bytes, num_active_vectors, *x_active_ind, * y_active_ind; double *x_data, *y_data, *dest, * src; hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors); num_active_vectors = x->num_active_vectors; size = x->size; x_data = x->data; y_data = y->data; x_active_ind=x->active_indices; y_active_ind=y->active_indices; if (x->num_active_vectors == x->num_vectors && y->num_active_vectors == y->num_vectors) { num_bytes = x->num_vectors * size * sizeof(double); memcpy(y_data, x_data, num_bytes); } else { num_bytes = size*sizeof(double); for (i=0; i < num_active_vectors; i++) { src=x_data + size * x_active_ind[i]; dest = y_data + size * y_active_ind[i]; memcpy(dest,src,num_bytes); } } return 0; } HYPRE_Int hypre_SeqMultivectorCopyWithoutMask(hypre_Multivector *x , hypre_Multivector *y) { HYPRE_Int byte_count; hypre_assert (x->size == y->size && x->num_vectors == y->num_vectors); byte_count = sizeof(double) * x->size * x->num_vectors; memcpy(y->data,x->data,byte_count); return 0; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorAxpy(double alpha, hypre_Multivector *x, hypre_Multivector *y) { HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind; double *x_data, *y_data, *src, *dest; hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors); x_data = x->data; y_data = y->data; size = x->size; num_active_vectors = x->num_active_vectors; x_active_ind = x->active_indices; y_active_ind = y->active_indices; if (x->num_active_vectors == x->num_vectors && y->num_active_vectors == y->num_vectors) { for(i = 0; i < x->num_vectors*size; i++) dest[i] += alpha * src[i]; } else { for(i = 0; i < num_active_vectors; i++) { src = x_data + x_active_ind[i]*size; dest = y_data + y_active_ind[i]*size; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < size; j++) dest[j] += alpha * src[j]; } } return 0; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorByDiag: " y(<y_mask>) = alpha(<mask>) .* x(<x_mask>) " *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorByDiag(hypre_Multivector *x, HYPRE_Int *mask, HYPRE_Int n, double *alpha, hypre_Multivector *y) { HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind; HYPRE_Int *al_active_ind, num_active_als; double *x_data, *y_data, *dest, *src, current_alpha; hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors); /* build list of active indices in alpha */ al_active_ind = hypre_TAlloc(HYPRE_Int,n); num_active_als = 0; if (mask!=NULL) for (i=0; i<n; i++) { if (mask[i]) al_active_ind[num_active_als++]=i; } else for (i=0; i<n; i++) al_active_ind[num_active_als++]=i; hypre_assert (num_active_als==x->num_active_vectors); x_data = x->data; y_data = y->data; size = x->size; num_active_vectors = x->num_active_vectors; x_active_ind = x->active_indices; y_active_ind = y->active_indices; for(i = 0; i < num_active_vectors; i++) { src = x_data + x_active_ind[i]*size; dest = y_data + y_active_ind[i]*size; current_alpha=alpha[ al_active_ind[i] ]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < size; j++) dest[j] = current_alpha*src[j]; } hypre_TFree(al_active_ind); return 0; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorInnerProd(hypre_Multivector *x, hypre_Multivector *y, double *results ) { HYPRE_Int i, j, k, size, *x_active_ind, *y_active_ind; HYPRE_Int x_num_active_vectors, y_num_active_vectors; double *x_data, *y_data, *y_ptr, *x_ptr, current_product; hypre_assert (x->size==y->size); x_data = x->data; y_data = y->data; size = x->size; x_num_active_vectors = x->num_active_vectors; y_num_active_vectors = y->num_active_vectors; /* we assume that "results" points to contiguous array of (x_num_active_vectors X y_num_active_vectors) doubles */ x_active_ind = x->active_indices; y_active_ind = y->active_indices; for(j = 0; j < y_num_active_vectors; j++) { y_ptr = y_data + y_active_ind[j]*size; for (i = 0; i < x_num_active_vectors; i++) { x_ptr = x_data + x_active_ind[i]*size; current_product = 0.0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE #endif for(k = 0; k < size; k++) current_product += x_ptr[k]*y_ptr[k]; /* column-wise storage for results */ *results++ = current_product; } } return 0; } /*-------------------------------------------------------------------------- * hypre_SeqMultivectorInnerProdDiag *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqMultivectorInnerProdDiag(hypre_Multivector *x, hypre_Multivector *y, double *diagResults) { double *x_data, *y_data, *y_ptr, *x_ptr, current_product; HYPRE_Int i, k, size, num_active_vectors, *x_active_ind, *y_active_ind; hypre_assert(x->size==y->size && x->num_active_vectors == y->num_active_vectors); x_data = x->data; y_data = y->data; size = x->size; num_active_vectors = x->num_active_vectors; x_active_ind = x->active_indices; y_active_ind = y->active_indices; for (i=0; i<num_active_vectors; i++) { x_ptr = x_data + x_active_ind[i]*size; y_ptr = y_data + y_active_ind[i]*size; current_product = 0.0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE #endif for(k=0; k<size; k++) current_product += x_ptr[k]*y_ptr[k]; *diagResults++ = current_product; } return 0; } HYPRE_Int hypre_SeqMultivectorByMatrix(hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight, HYPRE_Int rWidth, double* rVal, hypre_Multivector *y) { HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind; double *x_data, *y_data, *x_ptr, *y_ptr, current_coef; hypre_assert(rHeight>0); hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors); x_data = x->data; y_data = y->data; size = x->size; x_active_ind = x->active_indices; y_active_ind = y->active_indices; gap = rGHeight - rHeight; for (j=0; j<rWidth; j++) { y_ptr = y_data + y_active_ind[j]*size; /* ------ set current "y" to first member in a sum ------ */ x_ptr = x_data + x_active_ind[0]*size; current_coef = *rVal++; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE #endif for (k=0; k<size; k++) y_ptr[k] = current_coef * x_ptr[k]; /* ------ now add all other members of a sum to "y" ----- */ for (i=1; i<rHeight; i++) { x_ptr = x_data + x_active_ind[i]*size; current_coef = *rVal++; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE #endif for (k=0; k<size; k++) y_ptr[k] += current_coef * x_ptr[k]; } rVal += gap; } return 0; } HYPRE_Int hypre_SeqMultivectorXapy (hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight, HYPRE_Int rWidth, double* rVal, hypre_Multivector *y) { double *x_data, *y_data, *x_ptr, *y_ptr, current_coef; HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind; hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors); x_data = x->data; y_data = y->data; size = x->size; x_active_ind = x->active_indices; y_active_ind = y->active_indices; gap = rGHeight - rHeight; for (j=0; j<rWidth; j++) { y_ptr = y_data + y_active_ind[j]*size; for (i=0; i<rHeight; i++) { x_ptr = x_data + x_active_ind[i]*size; current_coef = *rVal++; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE #endif for (k=0; k<size; k++) y_ptr[k] += current_coef * x_ptr[k]; } rVal += gap; } return 0; }
GB_unaryop__ainv_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_fp32 // op(A') function: GB_tran__ainv_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_fp32 ( uint16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cg_aux.h
//MIT License // //Copyright (c) 2018 Sicong Zhuang // //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: // //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. #ifndef __CG_AUX_H__ #define __CG_AUX_H__ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include "vector.h" #include "csparse.h" #include "hb_io.h" #define FP_SQRT sqrt #define FP_RAND drand48 #define FP_SEED srand48 #define FP_ABS fabs #define FP_EXP frexp #define FP_LOG10 log10 #define FP_POW pow #define FP_SCANSPEC scan_dconspec #ifdef INTEL_MKL #include "mkl.h" #define BLAS_cp(n, dx, incx, dy, incy) cblas_dcopy(n, dx, incx, dy, incy) #define BLAS_dot(n, dx, incx, dy, incy) cblas_ddot(n, dx, incx, dy, incy) #define BLAS_axpy(n, da, dx, incx, dy, incy) cblas_daxpy(n, da, dx, incx, dy, incy) #define SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr1, Bptr, beta, Cptr) \ mkl_dcsrmv(trans, &m, &n, &alpha, matdescra, avval, avpos, avptr, avptr1, Bptr, &beta, Cptr) #elif defined LAPACK #include "cblas.h" #define BLAS_cp(n, dx, incx, dy, incy) cblas_dcopy(n, dx, incx, dy, incy) #define BLAS_dot(n, dx, incx, dy, incy) cblas_ddot(n, dx, incx, dy, incy) #define BLAS_axpy(n, da, dx, incx, dy, incy) cblas_daxpy(n, da, dx, incx, dy, incy) #define SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr1, Bptr, beta, Cptr) \ manual_csrmv(trans, m, n, alpha, avval, avpos, avptr, Bptr, beta, Cptr) #endif typedef struct strhbmat { int m, n; int elemc; int *vptr; int *vpos; void *vval; int *vdiag; int *udiagc; int b; int type; struct strhbmat *orig; struct strhbmat *trans; struct strhbmat *hyper; int orig_row; int orig_col; int *e_tree; int FACT; /* * The following for hyper-matrix only */ int *vptr_pool; int *vpos_pool; void *vval_pool; int vptr_unit; int vpos_unit; int vval_unit; int vptr_pp; int vpos_pp; int vval_pp; // pthread_mutex_t* mtx; } hbmat_t; extern const char *scan_dconspec; extern const char *scan_sconspec; void hb_read_double(char *input_file, int *m, int *n, int *elemc, int **vptr, int **vpos, double **vval); void hb_reset(hbmat_t *A); void one2zero(hbmat_t* in_matrix); void hb_sym_expand(hbmat_t *A, hbmat_t *B); void hb_init_basic(hbmat_t *A, hbmat_t *B); void hb_free(hbmat_t *A); void* __hb2hbh_block(int I, int J, hbmat_t *A, int b, hbmat_t *Bp) ; hbmat_t* hb2hbh(hbmat_t *A, int b, int is_csr); void hb_sym_diag_block(hbmat_t *src_mat, int bsze, hbmat_t *diagb); int read_mm2dense(FILE *f, int m, int n, double *A); void print_dense2mm(FILE *f, const char *name, int m, int n, const double *A, int lda); void fprint_dense2mm(const char *fname, const char *name, int m, int n, const double *A, int lda); static inline void __attribute__((always_inline)) bblas_dcopy(int p, int bm, int bn, int m, int n, double *X, double *Y) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; __t_copy(p, c, d, m, n, X, Y, j*m+i, j*m+i); // __t_copy(p, c, d, m, n, &X[j*m+i], &Y[j*m+i]); } } } static inline void __attribute__((always_inline)) hbsblas_dcsrmv(int p, int b, double alpha, hbmat_t *Ahbh, double *B, double beta, double *C) { int M = Ahbh->m; int N = Ahbh->n; int *vptr = Ahbh->vptr; int *vpos = Ahbh->vpos; hbmat_t **vval = Ahbh->vval; int offs = vptr[0] == 0 ? 0 : 1; //Detect zero/one based int cmaj = 1; char *trans = "N"; char *matdescra = "GLNC"; double fp_one = 1.0; int I; for ( I = 0; I < M; ++I ) { double *Cptr = &C[I*b]; int first = 1; int J; for ( J = vptr[I]; J < vptr[I+1]; J++ ) { hbmat_t *A = vval[J]; int icol = vpos[J]; double *Bptr = &B[icol*b]; double *avval = A->vval; int *avpos = A->vpos; int *avptr = A->vptr; int m = A->m; int n = A->n; if ( first ) { #pragma omp task in(B[icol*b:icol*b+n-1]) out(C[I*b:I*b+m-1]) no_copy_deps label(csrmv_hbh) priority(p) SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, beta, Cptr); // mkl_dcsrmv(trans, &m, &n, &alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, &beta, Cptr); first = 0; } else { #pragma omp task in(B[icol*b:icol*b+n-1]) out(C[I*b:I*b+m-1]) no_copy_deps label(csrmv_hbh) priority(p) // #pragma omp task in(B[icol*b;n]) out(C[I*b;m]) no_copy_deps label(csrmv_hbh) priority(p) SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, fp_one, Cptr); // mkl_dcsrmv(trans, &m, &n, &alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, &fp_one, Cptr); } } } } static inline void __attribute__((always_inline)) bsblas_dcholsolv2(int p, int b, int m, css **S, csn **N, double *B, double *x) { int idx; int i; for ( i = 0, idx = 0; i < m; i+=b, idx++) { int bs = b < m-i ? b : m-i; css *sptr = S[idx]; csn *nptr = N[idx]; double *bptr = &B[i]; double *xptr = &x[i]; #pragma omp task in(B[i:i+bs-1]) out(x[i:i+bs-1]) label(dcholsolv2) cs_cholsol2(bs, sptr, nptr, bptr, xptr); } } static inline void __attribute__((always_inline)) bsblas_dcholsolv2_seq(int p, int b, int m, css **S, csn **N, double *B, double *x) { int idx; int i; for ( i = 0, idx = 0; i < m; i+=b, idx++) { int bs = b < m-i ? b : m-i; css *sptr = S[idx]; csn *nptr = N[idx]; double *bptr = &B[i]; double *xptr = &x[i]; cs_cholsol2(bs, sptr, nptr, bptr, xptr); } } static inline void __attribute__((always_inline)) dcholsolv2_blk(int p, int m, css *S, csn *N, double *B, double *x) { #pragma omp task in(B[0:m-1]) out(x[0:m-1]) label(dcholsolv2_blk) cs_cholsol2(m, S, N, B, x); } static inline void __attribute__((always_inline)) dcholsolv2_nested(int p, int b, int m, css **S, csn **N, double *B, double *x) { #pragma omp task in(B[0:m-1]) out(x[0:m-1]) label(dcholsolv2_nested) { int idx; int i; for ( i = 0, idx = 0; i < m; i+=b, idx++) { int bs = b < m-i ? b : m-i; css *sptr = S[idx]; csn *nptr = N[idx]; double *bptr = &B[i]; double *xptr = &x[i]; #pragma omp task label(dcholsolv2_in) //in([bs]bptr) out([bs]xptr) label(dcholsolv2_in) cs_cholsol2(bs, sptr, nptr, bptr, xptr); } #pragma omp taskwait } } static inline void __attribute__((always_inline)) bblas_ddot(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for ( i=0, idx=0; i<m; i+=bm, ++idx ) { int cs = m - i; int c = cs < bm ? cs : bm; __t_dot(p, c, d, m, n, X, Y, j*m+i, j*m+i, result); // __t_dot(p, c, d, m, n, &X[j*m+i], &Y[j*m+i], result); } result += bn; } } static inline void __attribute__((always_inline)) bblas_dcpaxpy_comb(int bm, int bn, int m, int n, double alpha, double *Anum, double *Aden, double *X1, double *X2, double *Y1, double *Y2, double *Z1, double *Z2) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; __t_cpaxpy_comb(c, d, m, n, alpha, &Anum[j], &Aden[j], &X1[j*m+i], &X2[j*m+i], &Y1[j*m+i], &Y2[j*m+i], &Z1[j*m+i], &Z2[j*m+i]); } } } static inline void __attribute__((always_inline)) bblas_extm_daxpy(int p, int bm, int bn, int m, int n, double *SAnum, double *SAden, double *X, double *Y, double *Z) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; int j; for ( j=0; j<n; j+=bn) { int ds = n - j; int d = ds < bn ? ds : bn; __t_extm_axpy(c, d, m, n, &SAnum[j], &SAden[j], &X[j*m+i], &Y[j*m+i], &Z[j*m+i], p); } } } static inline __attribute__((always_inline)) void cg_ddot2(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result, double *A, double *B, double *result2) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for ( i=0, idx=0; i<m; i+=bm, ++idx ) { int cs = m - i; int c = cs < bm ? cs : bm; _cg_dot2(p, c, d, m, n, X, Y, j*m+i, j*m+i, result, A, B, j*m+i, j*m+i, result2); } result += bn; result2 += bn; } } static inline void __attribute__((always_inline)) bblas_ddot_array(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for ( i=0, idx=0; i<m; i+=bm, ++idx ) { int cs = m - i; int c = cs < bm ? cs : bm; __t_dot_array(p, c, d, m, n, X, Y, j*m+i, j*m+i, result, idx); } result += bn; } } static inline __attribute__((always_inline)) void cg_ddot2_array(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result, double *A, double *B, double *result2) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for (i=0, idx=0; i<m; i+=bm, idx++) { int cs = m - i; int c = cs < bm ? cs : bm; _cg_dot2_array(p, c, d, m, n, X, Y, j*m+i, j*m+i, result, idx, A, B, j*m+i, j*m+i, result2, idx); } result += bn; result2 += bn; } } #pragma omp task in(X[initx:initx+bm-1]) out(Y[inity:inity+bm-1]) priority(p) label(dcopy) no_copy_deps void __t_copy(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1]) concurrent(result[0:bn-1]) no_copy_deps priority(p) label(ddot) void __t_dot(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1]) concurrent(result[0:(m+bm-1)/bm-1]) no_copy_deps priority(p) label(ddot_array) void __t_dot_array(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result, int initr); #pragma omp task in(X1[0:bm-1], X2[0:bm-1], Anum[0:bn-1], Aden[0:bn-1], Y1[0:bm-1], Y2[0:bm-1]) out(Z1[0:bm-1], Z2[0:bm-1]) no_copy_deps priority(1) label(dcpaxpy_comb) void __t_cpaxpy_comb(int bm, int bn, int m, int n, double alpha, double *Anum, double *Aden, double *X1, double *X2, double *Y1, double *Y2, double *Z1, double *Z2); #pragma omp task in(X[0:bm-1], Y[0:bm-1], SAnum[0:bn-1], SAden[0:bn-1]) out(Z[0:bm-1]) no_copy_deps priority(p) label(extm_axpy) void __t_extm_axpy(int bm, int bn, int m, int n, double *SAnum, double *SAden, double *X, double *Y, double *Z, int p); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1], A[inita:inita+bm-1], B[initb:initb+bm-1]) concurrent([bn]result, [bn]result2) no_copy_deps priority(p) label(cg_dot2) void _cg_dot2(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result, double *A, double *B, int inita, int initb, double *result2); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1], A[inita:inita+bm-1], B[initb:initb+bm-1]) concurrent(result[0:(m+bm-1)/bm-1], result2[0:(m+bm-1)/bm-1]) no_copy_deps priority(p) label(ddot2_array) void _cg_dot2_array(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result, int initr, double *A, double *B, int inita, int initb, double *result2, int initr2); void manual_csrmv(char *trans, int m, int n, double alpha, double *avval, int *avpos, int *avptr, double *Bptr, double beta, double *Cptr); #endif //__CG_AUX_H__
integral.c
/*3456789012345678901234567890123456789012345678901234567890123456789012345678*/ #define GAE 114474615732576576.000000 /******************************************************************* 2015-02-10 intcurve Calculate an approximation of the integral of x^8-x^6+x^4-x^2+1 from min to max This is a single file program without a makefile. It is built: mpicc -lm -g intcurve.c -o intcurve *******************************************************************/ /******************************************************************* included headers *******************************************************************/ #include <mpi.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <limits.h> #include <omp.h> typedef struct result_record result_record; struct result_record { char * solver; double area; int size; }; /******************************************************************* pdiv: In: x1 - a double for the first point x2 - a double for the second point div - a divisor Out: return - a double for x1+x2/divisor *******************************************************************/ double pdiv(double x1, double x2, double div) { return ((x1+x2)/div); } /******************************************************************* line: Print a line of 78 = signs to set off program output In: none Out: return - none stdout - to screen *******************************************************************/ void line() { int m; for (m=0; m<=78; m++) printf("="); printf("\n"); } /******************************************************************* startup_message: Print a message at program startup In: none Out: return - none stdout - to screen *******************************************************************/ void startup_message() { line(); printf("\n\tStarting calculation\n\n"); line(); } /******************************************************************* final_output: Print the final output In: the calulated area Out: return - none stdout - to screen *******************************************************************/ void final_output(result_record * results) { int i; double error; double offset; line(); for (i=0;i<results[0].size;i++) { error=results[i].area-GAE; offset=((GAE-results[i].area)/GAE)*100.0; printf("\n"); printf("\tFinal area (%8s):\t%30.6lf\n",results[i].solver,results[i].area); printf("\tError (s-A):\t\t%30.6f\n",error); printf("\tError (a/A)%%:\t\t%30.10lG\n",offset); } printf("\n"); line(); } /******************************************************************* f: Return the value of the polynomial equation: f(x)=x^8-x^6+x^4-x^2+1 In: x - a double precision value to evaluate the equation for Out: return - a double for the value of the equation at x *******************************************************************/ double f( double x ) { return (pow(x,8)-pow(x,6)+pow(x,4)-pow(x,2)+1); } /******************************************************************* pavg: Return the average value of an equation at a point In: f - a double precision function x1 - a double for the first point x2 - a double for the second point Out: return - a double for average of the points *******************************************************************/ double pavg( double(*f)(const double), double x1, double x2 ) { return pdiv(f(x1),f(x2),2); } /******************************************************************* trule: Return the average value of an equation at a point In: f - a double precision function x1 - a double for the first point x2 - a double for the second point Out: return - a double for average of the points *******************************************************************/ double trule(double x1, double x2 ) { return pavg(&f,x1,x2)*(x2-x1); } /******************************************************************* default_solve: Approximate solver In: xmin - a double for lowest point in range xmax - a double for highest point in range rank - integer for the rank or thread nrank - total number of ranks or threads samples - total number of samples Out: return - a double for average of the points *******************************************************************/ double default_solve(double xmin, double xmax, int rank, int nranks, long int samples) { long int localsamples=0; double ss=0.0; double lxmin=0.0; double lfxs=0.0; double range=0.0; /* Setup for local evaluation */ localsamples=samples/nranks; range=pdiv(xmax,-xmin,nranks); ss=range/localsamples; lxmin=(range*rank)+xmin; /* Caculate rank-local values */ #pragma omp parallel /* loop counter*/ long int j; for (j=0; j<localsamples; j++ ) { lfxs=pavg(&f,lxmin,lxmin+ss)+lfxs; lxmin=lxmin+ss; } return (lfxs*((xmax-xmin)/samples)); } /******************************************************************* simpson: Solve via simpson's method... donuts! In: xmin - a double for lowest point in range xmax - a double for highest point in range rank - integer for the rank or thread nrank - total number of ranks or threads samples - total number of samples Out: return - a double for average of the points *******************************************************************/ double simpson( double xmin, double xmax, int rank, int nranks, long int samples) { long int localsamples=0; double lxmin=0.00; double lxmax=0.00; double xdiff=(xmax-xmin); double xoffset=0.0; double lsect=0.0; /* make values evenly divisible */ do { localsamples=samples/nranks; samples++; } while((localsamples%2)!=0); xoffset=xdiff/samples; lxmax=(xoffset*localsamples)*(rank+1)+xmin; lxmin=(xoffset*localsamples)*(rank)+xmin; xoffset=(lxmax-lxmin)/localsamples; lsect=f(lxmin)+f(lxmax); #pragma omp parallel { long int i; for(i=1;i<localsamples;i=i+2) lsect=( 4 * f(lxmin+(i*xoffset)))+lsect; for(i=2;i<localsamples-1;i=i+2) lsect=( 2 * f(lxmin+(i*xoffset)))+lsect; } lsect=(xoffset/3)*lsect; return lsect; } /******************************************************************* main: Main routine In: argv and argc - both unused Out: return - an integer value for the return code *******************************************************************/ int main( int argc, char *argv[] ) { /* range across which the integral will be evaluated */ double xmax=100.3333333; double xmin=-10.2666667; /* set the number of samples used in the approximation */ long int samples=pow(10,9); /* values used in the solve */ double fdfs=0; double ldfs=0; double fsimpson=0; double lsimpson=0; /* loop counters */ int i,j,k; /* things done to wreck havoc */ result_record results[2]; /* variables for process MPI information */ int nranks=1; int rank=0; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&nranks); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Status status; MPI_Request request[nranks*2]; /* Status to let us know things have started */ if (rank == 0) { startup_message(); } MPI_Barrier(MPI_COMM_WORLD); /* Setup for local evaluation */ for (i=0; i<nranks; i++) { if ( rank == i) { ldfs=default_solve(xmin,xmax,rank,nranks,samples); lsimpson=simpson(xmin,xmax,rank,nranks,samples); } } /* Output local values for each rank */ for (k=0; k<nranks; k++) { if ( rank == k) { printf("rank %3d deflt: % 26.6f simpson: % 26.6f\n",rank,ldfs,lsimpson); } } MPI_Reduce(&ldfs,&fdfs,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD); /* Sum for default solver */ if (rank == 0) { results[0].solver="default"; results[0].area=fdfs; results[0].size=2; } MPI_Reduce(&lsimpson,&fsimpson,1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD); /* Sum for simpson */ if (rank == 0) { results[1].solver="simpson"; results[1].area=fsimpson; results[1].size=2; } /* Output final results */ if (rank == 0) { final_output( &results); } MPI_Finalize(); return 0; }
checkpoint-synchronization.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <omp.h> int main() { int jobs = 41, tid; omp_set_num_threads(5); #pragma omp parallel shared(jobs) private(tid) { tid = omp_get_thread_num(); while (jobs > 0) { /* this is the checkpoint */ #pragma omp barrier if (!jobs) break; printf("%d: taking job %d\n", tid, jobs--); usleep(100000 + rand() / (double) RAND_MAX * 3000000); printf("%d: done job\n", tid); } printf("[%d] leaving\n", tid); /* this stops jobless thread from exiting early and killing workers */ #pragma omp barrier } return 0; }
gen_input.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } #include <time.h> #include <stdlib.h> #include <stdio.h> #ifdef FP_NUMBER typedef double FP_NUMBER; #else typedef float FP_NUMBER; #endif #define GET_RAND_FP ((FP_NUMBER)rand()/((FP_NUMBER)(RAND_MAX)+(FP_NUMBER)(1))) char L_FNAME[32], U_FNAME[32], A_FNAME[32]; int main (int argc, char **argv){ int i,j,k,MatrixDim; FP_NUMBER sum, *L, *U, *A; FILE *fl,*fu,*fa; if ( argc < 2) { printf("./gen_input [Matrix_Dimension_size]\n"); return 1; } MatrixDim = atoi(argv[1]); L = (FP_NUMBER *) malloc(sizeof(FP_NUMBER*)*MatrixDim*MatrixDim); U = (FP_NUMBER *) malloc(sizeof(FP_NUMBER*)*MatrixDim*MatrixDim); A = (FP_NUMBER *) malloc(sizeof(FP_NUMBER*)*MatrixDim*MatrixDim); if ( !L || !U || !A){ printf("Can not allocate memory\n"); if (L) free(L); if (U) free(U); if (A) free(A); return 1; } srand(time(NULL)); sprintf(L_FNAME, "l-%d.dat", MatrixDim); fl = fopen(L_FNAME, "wb"); if (fl == NULL) { printf("Cannot open file %s\n", L_FNAME); return 1; } sprintf(U_FNAME, "u-%d.dat", MatrixDim); fu = fopen(U_FNAME, "wb"); if (fu == NULL) { printf("Cannot open file %s\n", U_FNAME); return 1; } sprintf(A_FNAME, "%d.dat", MatrixDim); fa = fopen(A_FNAME, "wb"); if (!fa) { printf("Cannot open file %s\n", A_FNAME); return 1; } { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for default(none) private(i,j) shared(L,U,MatrixDim) for (i=0; i < MatrixDim; i ++){ for (j=0; j < MatrixDim; j++){ if ( i == j) { L[i * MatrixDim + j] = 1.0; U[i * MatrixDim + j] = GET_RAND_FP; } else if (i < j){ L[i * MatrixDim + j] = 0; U[i * MatrixDim + j] = GET_RAND_FP; } else { // i > j L[i * MatrixDim + j] = GET_RAND_FP; U[i * MatrixDim + j] = 0; } } } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma62_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for default(none) private(i,j,k,sum) shared(L,U,A,MatrixDim) for (i=0; i < MatrixDim; i++ ) { for (j=0; j < MatrixDim; j++){ sum = 0; for(k=0; k < MatrixDim; k++) sum += L[i * MatrixDim + k]*U[k * MatrixDim + j]; A[i * MatrixDim + j] = sum; } } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma79_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } for (i=0; i < MatrixDim; i ++) { for (j=0; j < MatrixDim; j++) fprintf(fl, "%f ", L[i * MatrixDim + j]); fprintf(fl, "\n"); } fclose(fl); for (i=0; i < MatrixDim; i ++) { for (j=0; j < MatrixDim; j++) fprintf(fu, "%f ", U[i * MatrixDim + j]); fprintf(fu, "\n"); } fclose(fu); fprintf(fa, "%d\n", MatrixDim); for (i=0; i < MatrixDim; i ++) { for (j=0; j < MatrixDim; j++) fprintf(fa, "%f ", A[i * MatrixDim + j]); fprintf(fa, "\n"); } fclose(fa); free(L); free(U); free(A); return 0; }
BCAssem.c
/* * BCAssem.c * * Created on: Oct 6, 2014 * Author: lurker */ #include "mex.h" #include <string.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef SINGLE #define REAL float #else /* not SINGLE */ #define REAL double #endif /* not SINGLE */ /* * * Boundary Condition Assembly. * * Do integral of test function and trial function on boundary. * * Input : Edges * * Output: [I, J, V] for building sparse matrix * */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { REAL *np = mxGetPr(prhs[0]); REAL *ne = mxGetPr(prhs[1]); REAL *nodes = mxGetPr(prhs[2]); int *edges = (int *)mxGetData(prhs[3]); /* each boundary node takes 2 edges.*/ int nzmax = 4*(int)*ne; plhs[0] = mxCreateNumericMatrix(nzmax, 1, mxDOUBLE_CLASS, mxREAL); plhs[1] = mxCreateNumericMatrix(nzmax, 1, mxDOUBLE_CLASS, mxREAL); plhs[2] = mxCreateNumericMatrix(nzmax, 1, mxDOUBLE_CLASS, mxREAL); REAL *pI = mxGetPr(plhs[0]); REAL *pJ = mxGetPr(plhs[1]); REAL *pV = mxGetPr(plhs[2]); mwSize i, j, k; mwSize node_1, node_2; REAL length; #pragma omp parallel for for (i = 0; i < *ne; i++) { node_1 = edges[2*i]; node_2 = edges[2*i + 1]; length = sqrt(pow(nodes[2*node_1] - nodes[2*node_2],2) + pow(nodes[2*node_1 + 1] - nodes[2*node_2 + 1], 2)); for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { *pI++ = edges[2*i + j] + 1; *pJ++ = edges[2*i + k] + 1; if (j != k) { *pV++ = length/3.0; } else { *pV++ = length/6.0; } } } } }
sink-3.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ /* Test that we can handle multiple undeclared sink variables gracefully. */ void bar (int *); void foo () { int i,j; #pragma omp parallel for ordered(1) for (i=0; i < 100; ++i) { #pragma omp ordered depend(sink:poo-1,paa+1) /* { dg-error "poo.*declared.*paa.*declared" } */ bar(&i); #pragma omp ordered depend(source) } }
GB_binop__plus_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_08__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_04__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int8) // A*D function (colscale): GB (_AxD__plus_int8) // D*A function (rowscale): GB (_DxB__plus_int8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int8) // C=scalar+B GB (_bind1st__plus_int8) // C=scalar+B' GB (_bind1st_tran__plus_int8) // C=A+scalar GB (_bind2nd__plus_int8) // C=A'+scalar GB (_bind2nd_tran__plus_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trmm_x_dia_n_hi_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number* Y = &y[index2(cc,0,ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i],Y[i],beta); const ALPHA_Number* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d >= 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val,X[ac]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
residualbased_elimination_builder_and_solver_with_constraints.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS /* System includes */ #include <unordered_set> #include <unordered_map> /* External includes */ /* Project includes */ #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "utilities/sparse_matrix_multiplication_utility.h" #include "utilities/constraint_utilities.h" #include "input_output/logger.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedEliminationBuilderAndSolverWithConstraints * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * The system is build in the following manner. A T matrix is assembled and constant vector g is assembled too. The T matrix contains the relations of all the dofs of the system, even the nodes with no master/slave relation. Then the size is n_total x n_red * The relation u = T u_red * Then: * A_red = T^t A T * b_red = T^t (b - A g) * @todo There is a more efficient way to asemble the system, but more costly, which is the following. In this case T will be only a relation matrix between master and slave dofs. Then n_slave x n_master: us = T um + g * Separating into independent dofs, master ans slave dofs: * u = uu * um * us * A = Auu Aum Aus * Amu Amm Ams * Asu Asm Ass * b = bu * bm * bs * Finally: * A_red = Auu Aum + Aus T * Amu + T^t Asu Amm + T^t Ams^t + Ams T + T^t Ass T * b_red = bu - Aus g * bm - Ams g * * This system requires extra care and is more complicated and requires to compute the blocks properly * @author Vicente Mataix Ferrandiz */ template <class TSparseSpace, class TDenseSpace, class TLinearSolver > class ResidualBasedEliminationBuilderAndSolverWithConstraints : public ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolverWithConstraints); /// Definition of the base class typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; // The size_t types typedef std::size_t SizeType; typedef std::size_t IndexType; /// Definition of the classes from the base class typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodeType NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; /// Additional definitions typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType; typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType; /// DoF types definition typedef typename NodeType::DofType DofType; typedef typename DofType::Pointer DofPointerType; /// Set definition typedef std::unordered_set<IndexType> IndexSetType; /// Map definition typedef std::unordered_map<IndexType, IndexType> IndexMapType; /// MPC definitions typedef MasterSlaveConstraint MasterSlaveConstraintType; typedef typename MasterSlaveConstraint::Pointer MasterSlaveConstraintPointerType; typedef std::vector<IndexType> VectorIndexType; typedef Vector VectorType; ///@} ///@name Enum's ///@{ ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : BaseType(pNewLinearSystemSolver) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "name" : "ResidualBasedEliminationBuilderAndSolverWithConstraints", "check_constraint_relation" : true, "reset_relation_matrix_each_iteration" : true })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); mCheckConstraintRelation = ThisParameters["check_constraint_relation"].GetBool(); mResetRelationMatrixEachIteration = ThisParameters["reset_relation_matrix_each_iteration"].GetBool(); } /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, const bool CheckConstraintRelation = true, const bool ResetRelationMatrixEachIteration = false ) : BaseType(pNewLinearSystemSolver), mCheckConstraintRelation(CheckConstraintRelation), mResetRelationMatrixEachIteration(ResetRelationMatrixEachIteration) { } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolverWithConstraints() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetUpSystem(ModelPart& rModelPart) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpSystemWithConstraints(rModelPart); else BaseType::SetUpSystem(rModelPart); } /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildWithConstraints(pScheme, rModelPart, rA, rb); else BaseType::Build(pScheme, rModelPart, rA, rb); } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildAndSolveWithConstraints(pScheme, rModelPart, A, Dx, b); else BaseType::BuildAndSolve(pScheme, rModelPart, A, Dx, b); } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) override { KRATOS_TRY if(rModelPart.MasterSlaveConstraints().size() > 0) BuildRHSWithConstraints(pScheme, rModelPart, b); else BaseType::BuildRHS(pScheme, rModelPart, b); KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpDofSetWithConstraints(pScheme, rModelPart); else BaseType::SetUpDofSet(pScheme, rModelPart); } /** * @brief It applies certain operations at the system of equations at the begining of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids. } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to initialize solution step.") } /** * @brief It applies certain operations at the system of equations at the end of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->FinalizeSolutionStep(r_process_info); } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to finalize solution step.") } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolverWithConstraints"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ TSystemMatrixPointerType mpTMatrix = NULL; /// This is matrix containing the global relation for the constraints TSystemMatrixPointerType mpOldAMatrix = NULL; /// This is matrix containing the old LHS structure TSystemVectorPointerType mpConstantVector = NULL; /// This is vector containing the rigid movement of the constraint TSystemVectorPointerType mpDeltaConstantVector = NULL; /// This is vector contains the effective constant displacement DofsArrayType mDoFMasterFixedSet; /// The set containing the fixed master DoF of the system DofsArrayType mDoFSlaveSet; /// The set containing the slave DoF of the system SizeType mDoFToSolveSystemSize = 0; /// Number of degrees of freedom of the problem to actually be solved IndexMapType mReactionEquationIdMap; /// In order to know the corresponding EquaionId for each component of the reaction vector bool mCheckConstraintRelation = false; /// If we do a constraint check relation bool mResetRelationMatrixEachIteration = false; /// If we reset the relation matrix at each iteration bool mComputeConstantContribution = false; /// If we compute the constant contribution of the MPC bool mCleared = true; /// If the system has been reseted ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assembles the global relation matrix (T matrix used to impose the MPC) * @param rT The global relation matrix * @param rTransformationMatrix The local transformation contribution * @param rSlaveEquationId The equation id of the slave dofs * @param rMasterEquationId The equation id of the master dofs */ void AssembleRelationMatrix( TSystemMatrixType& rT, const LocalSystemMatrixType& rTransformationMatrix, const EquationIdVectorType& rSlaveEquationId, const EquationIdVectorType& rMasterEquationId ) { const SizeType local_size_1 = rTransformationMatrix.size1(); for (IndexType i_local = 0; i_local < local_size_1; ++i_local) { IndexType i_global = rSlaveEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rT, rTransformationMatrix, i_global, i_local, rMasterEquationId); } } } /** * @brief This method construcs the relationship between the DoF * @param pScheme The integration scheme * @param rA The LHS of the system * @param rModelPart The model part which defines the problem */ void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) ConstructMatrixStructureWithConstraints(pScheme, rA, rModelPart); else BaseType::ConstructMatrixStructure(pScheme, rA, rModelPart); } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void BuildAndSolveWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY Timer::Start("Build"); // We apply the master/slave relationship before build ApplyMasterSlaveRelation(pScheme, rModelPart, rA, rDx, rb); // We compute the effective constant vector TSystemVectorType dummy_Dx(mDoFToSolveSystemSize); TSparseSpace::SetToZero(dummy_Dx); ComputeEffectiveConstant(pScheme, rModelPart, dummy_Dx); // We do the build (after that we resize the solution vector to avoid problems) BuildWithConstraints(pScheme, rModelPart, rA, rb); Timer::Stop("Build"); // Now we apply the BC rDx.resize(mDoFToSolveSystemSize, false); ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; // We solve the system of equations const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); SystemSolveWithPhysics(rA, rDx, rb, rModelPart); Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); // We compute the effective constant vector ComputeEffectiveConstant(pScheme, rModelPart, rDx); // We reconstruct the Unknowns vector and the residual const double start_reconstruct_slaves = OpenMPUtils::GetCurrentTime(); ReconstructSlaveSolutionAfterSolve(pScheme, rModelPart, rA, rDx, rb); const double stop_reconstruct_slaves = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Reconstruct slaves time: " << stop_reconstruct_slaves - start_reconstruct_slaves << std::endl; // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; KRATOS_CATCH("") } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void BuildRHSWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { Timer::Start("Build RHS"); // Resetting to zero the vector of reactions if(BaseType::mCalculateReactionsFlag) { TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); } // Builing without BC BuildRHSNoDirichlet(pScheme,rModelPart,rb); Timer::Stop("Build RHS"); ApplyDirichletConditionsRHS(pScheme, rModelPart, rb); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; if (BaseType::mCalculateReactionsFlag) { for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof const IndexType equation_id = r_dof.EquationId(); r_reactions_vector[mReactionEquationIdMap[equation_id]] += rb[equation_id]; } } } // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nRHS vector = " << rb << std::endl; } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs. * @details Equivalent to the ResidualBasedEliminationBuilderAndSolver but with constraints. The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSetWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) { KRATOS_TRY; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; // Declaring temporal variables DofsArrayType dof_temp_all, dof_temp_solvable, dof_temp_slave; // We assign an empty dof array to our dof sets BaseType::mDofSet = DofsArrayType(); /// This corresponds with all the DoF of the system mDoFSlaveSet = DofsArrayType(); /// This corresponds with the slave (the ones not solved after compacting the system using MPC) /** * Here we declare three sets. * - The global set: Contains all the DoF of the system * - The slave set: The DoF that are not going to be solved, due to MPC formulation */ set_type dof_global_set, dof_global_slave_set; #pragma omp parallel firstprivate(dof_list, second_dof_list) { ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We cleate the temporal set and we reserve some space on them set_type dofs_tmp_set, dof_temp_slave_set; dofs_tmp_set.reserve(20000); dof_temp_slave_set.reserve(200); // Gets the array of elements from the modeler ElementsArrayType& r_elements_array = rModelPart.Elements(); const int number_of_elements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetElementalDofList(*(it_elem.base()), dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of conditions from the modeler ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const int number_of_conditions = static_cast<int>(r_conditions_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetConditionDofList(*(it_cond.base()), dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of constraints from the modeler auto& r_constraints_array = rModelPart.MasterSlaveConstraints(); const int number_of_constraints = static_cast<int>(r_constraints_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every element it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); dof_temp_slave_set.insert(dof_list.begin(), dof_list.end()); } // We merge all the sets in one thread #pragma omp critical { dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); dof_global_slave_set.insert(dof_temp_slave_set.begin(), dof_temp_slave_set.end()); } } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; /// We transfer the temporal sets to our DoF set dof_temp_all.reserve(dof_global_set.size()); for (auto p_dof : dof_global_set) { dof_temp_all.push_back( p_dof ); } dof_temp_all.Sort(); BaseType::mDofSet = dof_temp_all; dof_temp_slave.reserve(dof_global_slave_set.size()); for (auto p_dof : dof_global_slave_set) { dof_temp_slave.push_back( p_dof ); } dof_temp_slave.Sort(); mDoFSlaveSet = dof_temp_slave; // Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", mDoFSlaveSet.size() == 0) << "No slave degrees of freedom to solve!" << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; #ifdef USE_LOCKS_IN_ASSEMBLY KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl; if (BaseType::mLockArray.size() != 0) { for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_destroy_lock(&BaseType::mLockArray[i]); } } BaseType::mLockArray.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_init_lock(&BaseType::mLockArray[i]); } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id()<< std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, ModelPart& rModelPart ) { KRATOS_TRY double norm_b = 0.0; if (TSparseSpace::Size(rb) > 0) norm_b = TSparseSpace::TwoNorm(rb); if (norm_b > 0.0) { // Create the auxiliar dof set DofsArrayType aux_dof_set; aux_dof_set.reserve(mDoFToSolveSystemSize); for (auto& r_dof : BaseType::mDofSet) { if (r_dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(r_dof); if (it == mDoFSlaveSet.end()) aux_dof_set.push_back( &r_dof ); } } aux_dof_set.Sort(); KRATOS_ERROR_IF_NOT(aux_dof_set.size() == mDoFToSolveSystemSize) << "Inconsistency (I) in system size: " << mDoFToSolveSystemSize << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; KRATOS_ERROR_IF_NOT(aux_dof_set.size() == rA.size1()) << "Inconsistency (II) in system size: " << rA.size1() << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; // Provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, aux_dof_set, rModelPart); // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function * @details Has the call to ApplyConstraints function call once the element and conditions compute their equation ids * @todo Move this method to a common class with block builder and solver with constraints */ virtual void ConstructMatrixStructureWithConstraints( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); // The total number of dof of the system const SizeType equation_size = BaseType::mEquationSystemSize; // This vector contains the indexes sets for all rows std::vector<IndexSetType> indices(equation_size); // We reserve some indexes on each row #pragma omp parallel for firstprivate(equation_size) for (int index = 0; index < static_cast<int>(equation_size); ++index) indices[index].reserve(40); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs #pragma omp parallel firstprivate(ids, second_ids) { // The process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<IndexSetType> temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto el_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) { auto it_elem = el_begin + i_elem; pScheme->EquationId( *(it_elem.base()), ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) { auto it_cond = cond_begin + i_cond; pScheme->Condition_EquationId( *(it_cond.base()), ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); // Constraint initial iterator const auto const_begin = rModelPart.MasterSlaveConstraints().begin(); // We iterate over the constraints #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); // Slave DoFs for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } // Master DoFs for (auto& id_i : second_ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : second_ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < indices.size(); ++i) nnz += indices[i].size(); rA = CompressedMatrixType(indices.size(), indices.size(), nnz); double *Avalues = rA.value_data().begin(); IndexType *Arow_indices = rA.index1_data().begin(); IndexType *Acol_indices = rA.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(rA.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rA.size1()); ++i) { const IndexType row_begin = Arow_indices[i]; const IndexType row_end = Arow_indices[i + 1]; IndexType k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); ++it) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } indices[i].clear(); //deallocating the memory std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } rA.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function has the call to ApplyConstraints function call once the element and conditions compute their equation slave_ids * @param pScheme The pointer to the integration scheme * @param rT The global relation matrix * @param rModelPart The model part to compute */ virtual void ConstructRelationMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rT, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("RelationMatrixStructure"); IndexMapType solvable_dof_reorder; std::unordered_map<IndexType, IndexSetType> master_indices; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; typedef std::pair<IndexType, IndexSetType> IndexIndexSetPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({counter}))); ++counter; } else { master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({}))); } } } // The process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); // TODO: OMP for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); for (auto& slave_id : ids) { if (slave_id < BaseType::mEquationSystemSize) { auto it_slave = solvable_dof_reorder.find(slave_id); if (it_slave == solvable_dof_reorder.end()) { for (auto& master_id : second_ids) { if (master_id < BaseType::mEquationSystemSize) { auto& master_row_indices = master_indices[slave_id]; master_row_indices.insert(solvable_dof_reorder[master_id]); } } } } } } } KRATOS_DEBUG_ERROR_IF_NOT(BaseType::mEquationSystemSize == master_indices.size()) << "Inconsistency in the dofs size: " << BaseType::mEquationSystemSize << "\t vs \t" << master_indices.size() << std::endl; // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) { nnz += master_indices[i].size(); } rT = CompressedMatrixType(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, nnz); double *Tvalues = rT.value_data().begin(); IndexType *Trow_indices = rT.index1_data().begin(); IndexType *Tcol_indices = rT.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Trow_indices[0] = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) Trow_indices[i + 1] = Trow_indices[i] + master_indices[i].size(); KRATOS_DEBUG_ERROR_IF_NOT(Trow_indices[BaseType::mEquationSystemSize] == nnz) << "Nonzero values does not coincide with the row index definition: " << Trow_indices[BaseType::mEquationSystemSize] << " vs " << nnz << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(rT.size1()); ++i) { const IndexType row_begin = Trow_indices[i]; const IndexType row_end = Trow_indices[i + 1]; IndexType k = row_begin; for (auto it = master_indices[i].begin(); it != master_indices[i].end(); ++it) { Tcol_indices[k] = *it; Tvalues[k] = 0.0; k++; } master_indices[i].clear(); //deallocating the memory std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]); } rT.set_filled(BaseType::mEquationSystemSize + 1, nnz); // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rT(solv_dof.first, solv_dof.second) = 1.0; } Timer::Stop("RelationMatrixStructure"); } /** * @brief This function is exactly same as the Build() function in base class except that the function * @details It has the call to ApplyConstraints function call once the LHS or RHS are computed by elements and conditions * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector * @param UseBaseBuild If the abse Build function will be used */ void BuildWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb, const bool UseBaseBuild = true ) { KRATOS_TRY // We build the original system if (UseBaseBuild) BaseType::Build(pScheme, rModelPart, rA, rb); else BuildWithoutConstraints(pScheme, rModelPart, rA, rb); // Assemble the constraints const double start_build = OpenMPUtils::GetCurrentTime(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(rA, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } // The auxiliar matrix to store the intermediate matrix multiplication TSystemMatrixType auxiliar_A_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); // We do a backup of the matrix before apply the constraints if (mpOldAMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewOldAMatrix = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpOldAMatrix.swap(pNewOldAMatrix); } (*mpOldAMatrix).swap(rA); // We resize of system of equations rA.resize(mDoFToSolveSystemSize, mDoFToSolveSystemSize, false); rb.resize(mDoFToSolveSystemSize, false); // Final multiplication SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, rTMatrix, rA); TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); // Cleaning up memory auxiliar_A_matrix.resize(0, 0, false); T_transpose_matrix.resize(0, 0, false); const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Constraint relation build time and multiplication: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichlet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY // Assemble the constraints const double start_build = OpenMPUtils::GetCurrentTime(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // We build the original system TSystemMatrixType A; // Dummy auxiliar matrix we ned to build anyway because are needed to impose the rigid displacements if (mComputeConstantContribution) { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); BuildWithoutConstraints(pScheme, rModelPart, A, rb); } else { BuildRHSNoDirichletWithoutConstraints(pScheme, rModelPart, rb); } // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(A, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } rb.resize(mDoFToSolveSystemSize, false); // Final multiplication TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Constraint relation build time and multiplication: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief This method resize and initializes the system of euqations * @details Additionally what is done in the base class the constraints are initialized * @param pA The pointer to the LHS matrix * @param pDx The pointer to the vector of Unknowns * @param pb The pointer to the RHS vector * @param rModelPart The model part to be computed */ void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { // We resize the basic system BaseType::ResizeAndInitializeVectors(pScheme, pA, pDx, pb, rModelPart); // If needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag) { const SizeType reactions_vector_size = BaseType::mDofSet.size() - mDoFToSolveSystemSize + mDoFMasterFixedSet.size(); TSystemVectorType& rReactionsVector = *(BaseType::mpReactionsVector); if (rReactionsVector.size() != reactions_vector_size) rReactionsVector.resize(reactions_vector_size, false); } // Now we resize the relation matrix used on the MPC solution if(rModelPart.MasterSlaveConstraints().size() > 0) { if (mpTMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewT = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpTMatrix.swap(pNewT); } // The rigid movement if (mpConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpConstantVector.swap(pNewConstantVector); } // The effective rigid movement if (mpDeltaConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpDeltaConstantVector.swap(pNewConstantVector); } // System matrices/vectors TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; // Resizing the system matrix if (rTMatrix.size1() == 0 || BaseType::GetReshapeMatrixFlag() || mCleared) { // If the matrix is not initialized rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } else { if (rTMatrix.size1() != BaseType::mEquationSystemSize || rTMatrix.size2() != mDoFToSolveSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } } // Resizing the system vector // The rigid movement if (rConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } else { if (rConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } } // The effective rigid movement if (mComputeConstantContribution) { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } else { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } } } } } /** * @brief It computes the reactions of the system * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY // Refresh RHS to have the correct reactions BuildRHS(pScheme, rModelPart, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; // Updating variables for (auto& r_dof : BaseType::mDofSet) { if ((r_dof.IsFixed()) || mDoFSlaveSet.find(r_dof) != mDoFSlaveSet.end()) { r_dof.GetSolutionStepReactionValue() = -r_reactions_vector[mReactionEquationIdMap[r_dof.EquationId()]]; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::CalculateReactions failed .."); } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details In the base ResidualBasedEliminationBuilderAndSolver does nothing, due to the fact that the BC are automatically managed with the elimination. But in the constrints approach the slave DoF depending on fixed DoFs must be reconstructed * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // We apply the same method as in the block builder and solver but instead of fixing the fixed Dofs, we just fix the master fixed Dofs std::vector<double> scaling_factors (mDoFToSolveSystemSize, 0.0); // NOTE: Dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it_first_check = mDoFSlaveSet.find(*it_dof); if (it_first_check == mDoFSlaveSet.end()) { auto it_second_check = mDoFSlaveSet.find(*it_dof); if (it_second_check == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end()) { scaling_factors[counter] = 1.0; } } counter += 1; } } } double* Avalues = rA.value_data().begin(); IndexType* Arow_indices = rA.index1_data().begin(); IndexType* Acol_indices = rA.index2_data().begin(); // Detect if there is a line of all zeros and set the diagonal to a 1 if this happens #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; bool empty = true; for (IndexType j = col_begin; j < col_end; ++j) { if(Avalues[j] != 0.0) { empty = false; break; } } if(empty) { rA(k,k) = 1.0; rb[k] = 0.0; } } #pragma omp parallel for for (int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; const double k_factor = scaling_factors[k]; if (k_factor == 0) { // Zero out the whole row, except the diagonal for (IndexType j = col_begin; j < col_end; ++j) if (static_cast<int>(Acol_indices[j]) != k ) Avalues[j] = 0.0; // Zero out the RHS rb[k] = 0.0; } else { // Zero out the column which is associated with the zero'ed row for (IndexType j = col_begin; j < col_end; ++j) { if(scaling_factors[ Acol_indices[j] ] == 0 ) { Avalues[j] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { BaseType::Clear(); // Reseting auxiliar set of dofs mDoFMasterFixedSet = DofsArrayType(); mDoFSlaveSet = DofsArrayType(); // Clearing the relation map mReactionEquationIdMap.clear(); // Clear constraint system if (mpTMatrix != nullptr) TSparseSpace::Clear(mpTMatrix); if (mpConstantVector != nullptr) TSparseSpace::Clear(mpConstantVector); if (mpDeltaConstantVector != nullptr) TSparseSpace::Clear(mpDeltaConstantVector); // Set the flag mCleared = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method computes the equivalent coounter part of the SetUpSystem when using constraints * @param rModelPart The model part of the problem to solve */ void SetUpSystemWithConstraints(ModelPart& rModelPart) { KRATOS_TRY // First we set up the system of equations without constraints // Set equation id for degrees of freedom the free degrees of freedom are positioned at the beginning of the system, while the fixed one are at the end (in opposite order). // // That means that if the EquationId is greater than "mEquationSystemSize" the pointed degree of freedom is restrained // This is almost the same SetUpSystem from ResidualBasedEliminationBuilderAndSolver, but we don't discard from the system the fixed dofs that are part of a constraint at the same time /// First we detect the master fixed DoFs /// // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Vector containing the localization in the system of the different terms DofsVectorType slave_dof_list, master_dof_list; // Declaring temporal variables DofsArrayType dof_temp_fixed_master; typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; set_type dof_global_fixed_master_set; // Iterate over constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); #pragma omp parallel firstprivate(slave_dof_list, master_dof_list) { // We cleate the temporal set and we reserve some space on them set_type dof_temp_fixed_master_set; dof_temp_fixed_master_set.reserve(2000); #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->GetDofList(slave_dof_list, master_dof_list, r_current_process_info); // Filling the set of dofs master and fixed at the same time for (auto& master_dof : master_dof_list) { if (master_dof->IsFixed()) { dof_temp_fixed_master_set.insert(master_dof); } } } } // We merge all the sets in one thread #pragma omp critical { dof_global_fixed_master_set.insert(dof_temp_fixed_master_set.begin(), dof_temp_fixed_master_set.end()); } } dof_temp_fixed_master.reserve(dof_global_fixed_master_set.size()); for (auto p_dof : dof_global_fixed_master_set) { dof_temp_fixed_master.push_back( p_dof ); } dof_temp_fixed_master.Sort(); mDoFMasterFixedSet = dof_temp_fixed_master; /// Now we compute as expected /// int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (auto& dof : BaseType::mDofSet) { if (dof.IsFixed()) { auto it = mDoFMasterFixedSet.find(dof); if (it == mDoFMasterFixedSet.end()) { dof.SetEquationId(--fix_id); } else { dof.SetEquationId(free_id++); } } else { dof.SetEquationId(free_id++); } } BaseType::mEquationSystemSize = fix_id; // Add the computation of the global ids of the solvable dofs IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { ++counter; } } } // The total system of equations to be solved mDoFToSolveSystemSize = counter; // Finally we build the relation between the EquationID and the component of the reaction counter = 0; for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof mReactionEquationIdMap.insert({r_dof.EquationId(), counter}); ++counter; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::SetUpSystemWithConstraints failed .."); } /** * @brief This method initializes the DoF using the master/slave relationship * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void ApplyMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // First we reset the slave dofs ConstraintUtilities::ResetSlaveDofs(rModelPart); // Now we apply the constraints ConstraintUtilities::ApplyConstraints(rModelPart); KRATOS_CATCH(""); } /** * @brief This method checks that the master/slave relation is properly set * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDx The vector of unkowns * @param rDxSolved The vector of unkowns actually solved */ bool CheckMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDx, TSystemVectorType& rDxSolved ) { KRATOS_TRY // Auxiliar values const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType current_solution(mDoFToSolveSystemSize); TSystemVectorType updated_solution(BaseType::mEquationSystemSize); TSystemVectorType residual_solution(BaseType::mEquationSystemSize); // Get current values IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { current_solution[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { residual_solution[equation_id] = it_dof->GetSolutionStepValue() + rDx[equation_id]; } } // Apply master slave constraints const TSystemMatrixType& rTMatrix = *mpTMatrix; TSparseSpace::Mult(rTMatrix, current_solution, updated_solution); if (mComputeConstantContribution) { ComputeConstraintContribution(pScheme, rModelPart, false, true); const TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::UnaliasedAdd(updated_solution, 1.0, rConstantVector); } TSparseSpace::UnaliasedAdd(residual_solution, -1.0, updated_solution); // Check database for(int k = 0; k < static_cast<int>(BaseType::mEquationSystemSize); ++k) { if (std::abs(residual_solution[k]) > std::numeric_limits<double>::epsilon()) return false; } return true; KRATOS_CATCH(""); } /** * @brief This method reconstructs the slave solution after Solving. * @param pScheme The pointer to the integration scheme * @param rModelPart Reference to the ModelPart containing the problem. * @param rA System matrix * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void ReconstructSlaveSolutionAfterSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // We get the global T matrix and the constant vector const TSystemMatrixType& rTMatrix = *mpTMatrix; // We reconstruct the complete vector of Unknowns TSystemVectorType Dx_copy = rDx; rDx.resize(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, Dx_copy, rDx); // Add the constant vector if (mComputeConstantContribution) { const TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::UnaliasedAdd(rDx, 1.0, rDeltaConstantVector); } // We check the solution if (mCheckConstraintRelation) { KRATOS_ERROR_IF_NOT(CheckMasterSlaveRelation(pScheme, rModelPart, rDx, Dx_copy)) << "The relation between master/slave dofs is not respected" << std::endl; } // Simply restore old LHS (rA).swap(*mpOldAMatrix); mpOldAMatrix = NULL; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::ReconstructSlaveSolutionAfterSolve failed .."); } /** * @brief Function to perform the build the system without constraints * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void BuildWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) { // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemMatrixType lhs_contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( lhs_contribution, rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*(it_elem.base()), lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); // Clean local elemental memory pScheme->CleanMemory(*(it_elem.base())); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*(it_cond.base()), lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); // Clean local elemental memory pScheme->CleanMemory(*(it_cond.base())); } } } } /** * @brief Function to perform the build of the RHS without constraints * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichletWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*(it_elem.base()), rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*(it_cond.base()), rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } } } /** * @brief This function does the assembling of the LHS and RHS * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling */ void AssembleWithoutConstraints( TSystemMatrixType& rA, TSystemVectorType& rb, const LocalSystemMatrixType& rLHSContribution, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); // Assemble RHS AssembleRHSWithoutConstraints(rb, rRHSContribution, rEquationId); // Assemble LHS for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); } } } /** * @brief Assembling local contribution of nodes and elements in the RHS * @param rb The RHS vector */ void AssembleRHSWithoutConstraints( TSystemVectorType& rb, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rRHSContribution.size(); if (!BaseType::mCalculateReactionsFlag) { for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { // free dof // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double rhs_value = rRHSContribution[i_local]; #pragma omp atomic r_b_value += rhs_value; } } } else { TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; auto it_dof = BaseType::mDofSet.begin() + i_global; const bool is_master_fixed = mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(*it_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof double& r_b_value = r_reactions_vector[mReactionEquationIdMap[i_global]]; const double rhs_value = rRHSContribution[i_local]; #pragma omp atomic r_b_value += rhs_value; } else if (it_dof->IsFree()) { // Free dof not in the MPC // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; #pragma omp atomic r_b_value += rhs_value; } } } } /** * @brief This method set to zero the relation matrix */ void ResetConstraintSystem() { TSystemMatrixType& rTMatrix = *mpTMatrix; double *Tvalues = rTMatrix.value_data().begin(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rTMatrix.nnz()); ++i) { Tvalues[i] = 0.0; } IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rTMatrix(solv_dof.first, solv_dof.second) = 1.0; } if (mComputeConstantContribution) { TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::SetToZero(rConstantVector); } } /** * @brief This method applies the BC, only in the RHS * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void ApplyDirichletConditionsRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // NOTE: dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { auto it_dof = it_dof_begin + k; if (k < static_cast<int>(BaseType::mEquationSystemSize)) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) != mDoFMasterFixedSet.end()) { rb[k] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This method computes the absolute constant contribution of the MPC * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param ComputeTranslationMatrix If the translation matrix will be assembled * @param ComputeConstantVector If the constant vector will be assembled * @return If there are constant constraints */ bool ComputeConstraintContribution( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, const bool ComputeTranslationMatrix = false, const bool ComputeConstantVector = false ) { KRATOS_TRY; // We build the global T matrix and the g constant vector TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; // Filling constant vector if (ComputeConstantVector) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mEquationSystemSize); ++i) { rConstantVector[i] = 0.0; } } // Auxiliar set to reorder master DoFs IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // The current process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Initialize the constant vector double aux_constant_value = 0.0; // Contributions to the system LocalSystemMatrixType transformation_matrix = LocalSystemMatrixType(0, 0); LocalSystemVectorType constant_vector = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType slave_equation_id, master_equation_id; const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); std::unordered_set<IndexType> auxiliar_constant_equations_ids; #pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_id, master_equation_id) { std::unordered_set<IndexType> auxiliar_temp_constant_equations_ids; auxiliar_temp_constant_equations_ids.reserve(2000); #pragma omp for schedule(guided, 512) for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info); it_const->EquationIdVector(slave_equation_id, master_equation_id, r_current_process_info); // Reassign reordered dofs to the master side for (auto& id : master_equation_id) { id = solvable_dof_reorder[id]; } if (ComputeConstantVector) { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; if (std::abs(constant_value) > 0.0) { auxiliar_temp_constant_equations_ids.insert(i_global); double& r_value = rConstantVector[i_global]; #pragma omp atomic r_value += constant_value; } } } } else { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; #pragma omp atomic aux_constant_value += std::abs(constant_value); } } } if (ComputeTranslationMatrix) { // Assemble the constraint contribution AssembleRelationMatrix(rTMatrix, transformation_matrix, slave_equation_id, master_equation_id); } } } // We merge all the sets in one thread #pragma omp critical { auxiliar_constant_equations_ids.insert(auxiliar_temp_constant_equations_ids.begin(), auxiliar_temp_constant_equations_ids.end()); } } return aux_constant_value > std::numeric_limits<double>::epsilon() ? true : false; KRATOS_CATCH(""); } /** * @brief This method computes the efective constant * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDxSolved The vector of unkowns actually solved */ void ComputeEffectiveConstant( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDxSolved ) { if (mComputeConstantContribution) { // We get const TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::Copy(rConstantVector, rDeltaConstantVector); // We reconstruct the complete vector of Unknowns TSystemVectorType Dx(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, rDxSolved, Dx); // Compute the effective constant vector // Auxiliar initial dof iterator const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType u(BaseType::mEquationSystemSize); #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { u[equation_id] = it_dof->GetSolutionStepValue() + Dx[equation_id]; } } TSystemVectorType u_bar(mDoFToSolveSystemSize); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { u_bar[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } TSystemVectorType u_bar_complete(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, u_bar, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, 1.0, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, -1.0, u); } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedEliminationBuilderAndSolverWithConstraints */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
myprog.c
#define MSIZE 200 int n, m, mits; double tol, relax = 1.0, alpha = 0.0543; double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE]; double dx, dy; void initialize () { int i, j, xx, yy; // double PI = 3.1415926; dx = 2.0 / (n - 1); // -->dx@112:2 dy = 2.0 / (m - 1); //-->dy@113:2 /* Initialize initial condition and RHS */ //#pragma omp parallel for private(i,j,xx,yy) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */ yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */ u[i][j] = 0.0; f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy) - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy); } }
target_data_array_extension.c
// -------------------------------------------------- // Check extends before // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=BEFORE // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // -------------------------------------------------- // Check extends after // -------------------------------------------------- // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu \ // RUN: -fopenmp-version=51 -DEXTENDS=AFTER // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // END. #include <stdio.h> #define BEFORE 0 #define AFTER 1 #define SIZE 100 #if EXTENDS == BEFORE # define SMALL_BEG (SIZE-2) # define SMALL_END SIZE # define LARGE_BEG 0 # define LARGE_END SIZE #elif EXTENDS == AFTER # define SMALL_BEG 0 # define SMALL_END 2 # define LARGE_BEG 0 # define LARGE_END SIZE #else # error EXTENDS undefined #endif #define SMALL_SIZE (SMALL_END-SMALL_BEG) #define LARGE_SIZE (LARGE_END-LARGE_BEG) #define SMALL SMALL_BEG:SMALL_SIZE #define LARGE LARGE_BEG:LARGE_SIZE int main() { int arr[SIZE]; // CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG], SMALL_SIZE * sizeof arr[0]); // CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]] fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG], LARGE_SIZE * sizeof arr[0]); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: arr[LARGE]) { #pragma omp target data map(present, tofrom: arr[SMALL]) ; } // CHECK: arr is present fprintf(stderr, "arr is present\n"); // CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes) // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(alloc: arr[SMALL]) { #pragma omp target data map(present, tofrom: arr[LARGE]) ; } // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
omp_atomic.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */ #define MAX_FACTOR 10 #define KNOWN_PRODUCT 3628800 /* 10! */ int test_omp_atomic() { int sum; int diff; double dsum = 0; double dt = 0.5; /* base of geometric row for + and - test*/ double ddiff; int product; int x; int *logics; int bit_and = 1; int bit_or = 0; int exclusiv_bit_or = 0; int j; int known_sum; int known_diff; int known_product; int result = 0; int logic_and = 1; int logic_or = 0; double dknown_sum; double rounding_error = 1.E-9; double dpt, div; int logicsArray[LOOPCOUNT]; logics = logicsArray; sum = 0; diff = 0; product = 1; // sum of integers test #pragma omp parallel { int i; #pragma omp for for (i = 1; i <= LOOPCOUNT; i++) { #pragma omp atomic sum += i; } } known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; if (known_sum != sum) { fprintf(stderr, "Error in sum with integers: Result was %d instead of %d.\n", sum, known_sum); result++; } // difference of integers test #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; i++) { #pragma omp atomic diff -= i; } } known_diff = ((LOOPCOUNT - 1) * LOOPCOUNT) / 2 * -1; if (diff != known_diff) { fprintf (stderr, "Error in difference with integers: Result was %d instead of 0.\n", diff); result++; } // sum of doubles test dsum = 0; dpt = 1; for (j = 0; j < DOUBLE_DIGITS; ++j) { dpt *= dt; } dknown_sum = (1 - dpt) / (1 -dt); #pragma omp parallel { int i; #pragma omp for for (i = 0; i < DOUBLE_DIGITS; ++i) { #pragma omp atomic dsum += pow (dt, i); } } if (dsum != dknown_sum && (fabs (dsum - dknown_sum) > rounding_error)) { fprintf (stderr, "Error in sum with doubles: Result was %f" " instead of: %f (Difference: %E)\n", dsum, dknown_sum, dsum - dknown_sum); result++; } // difference of doubles test dpt = 1; for (j = 0; j < DOUBLE_DIGITS; ++j) { dpt *= dt; } ddiff = (1 - dpt) / (1 - dt); #pragma omp parallel { int i; #pragma omp for for (i = 0; i < DOUBLE_DIGITS; ++i) { #pragma omp atomic ddiff -= pow (dt, i); } } if (fabs (ddiff) > rounding_error) { fprintf (stderr, "Error in difference with doubles: Result was %E instead of 0.0\n", ddiff); result++; } // product of integers test #pragma omp parallel { int i; #pragma omp for for (i = 1; i <= MAX_FACTOR; i++) { #pragma omp atomic product *= i; } } known_product = KNOWN_PRODUCT; if (known_product != product) { fprintf (stderr, "Error in product with integers: Result was %d instead of %d\n", product, known_product); result++; } // division of integers test product = KNOWN_PRODUCT; #pragma omp parallel { int i; #pragma omp for for (i = 1; i <= MAX_FACTOR; ++i) { #pragma omp atomic product /= i; } } if (product != 1) { fprintf (stderr, "Error in product division with integers: Result was %d" " instead of 1\n", product); result++; } // division of doubles test div = 5.0E+5; #pragma omp parallel { int i; #pragma omp for for (i = 1; i <= MAX_FACTOR; i++) { #pragma omp atomic div /= i; } } if (fabs(div-0.137787) >= 1.0E-4 ) { result++; fprintf (stderr, "Error in division with double: Result was %f" " instead of 0.137787\n", div); } // ++ test x = 0; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic x++; } } if (x != LOOPCOUNT) { result++; fprintf (stderr, "Error in ++\n"); } // -- test #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic x--; } } if (x != 0) { result++; fprintf (stderr, "Error in --\n"); } // bit-and test part 1 for (j = 0; j < LOOPCOUNT; ++j) { logics[j] = 1; } bit_and = 1; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic bit_and &= logics[i]; } } if (!bit_and) { result++; fprintf (stderr, "Error in BIT AND part 1\n"); } // bit-and test part 2 bit_and = 1; logics[LOOPCOUNT / 2] = 0; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic bit_and &= logics[i]; } } if (bit_and) { result++; fprintf (stderr, "Error in BIT AND part 2\n"); } // bit-or test part 1 for (j = 0; j < LOOPCOUNT; j++) { logics[j] = 0; } bit_or = 0; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic bit_or |= logics[i]; } } if (bit_or) { result++; fprintf (stderr, "Error in BIT OR part 1\n"); } // bit-or test part 2 bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic bit_or |= logics[i]; } } if (!bit_or) { result++; fprintf (stderr, "Error in BIT OR part 2\n"); } // bit-xor test part 1 for (j = 0; j < LOOPCOUNT; j++) { logics[j] = 0; } exclusiv_bit_or = 0; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic exclusiv_bit_or ^= logics[i]; } } if (exclusiv_bit_or) { result++; fprintf (stderr, "Error in EXCLUSIV BIT OR part 1\n"); } // bit-xor test part 2 exclusiv_bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < LOOPCOUNT; ++i) { #pragma omp atomic exclusiv_bit_or ^= logics[i]; } } if (!exclusiv_bit_or) { result++; fprintf (stderr, "Error in EXCLUSIV BIT OR part 2\n"); } // left shift test x = 1; #pragma omp parallel { int i; #pragma omp for for (i = 0; i < 10; ++i) { #pragma omp atomic x <<= 1; } } if ( x != 1024) { result++; fprintf (stderr, "Error in <<\n"); x = 1024; } // right shift test #pragma omp parallel { int i; #pragma omp for for (i = 0; i < 10; ++i) { #pragma omp atomic x >>= 1; } } if (x != 1) { result++; fprintf (stderr, "Error in >>\n"); } return (result == 0); } // test_omp_atomic() int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_atomic()) { num_failed++; } } return num_failed; }
flux.c
#include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include <ktime.h> #include <geometry.h> #include <phy.h> #ifdef __USE_HW_COUNTER #include <perf.h> #include <kperf.h> #endif #define MAG0 (0.5 / 3) #define MAG1 (-MAG0) /* Calculates the residual */ void compute_residual(struct residual *restrict res) { #ifdef __USE_HW_COUNTER const struct fd fd = res->perf_counters->fd; struct counters start; perf_read(fd, &start); const uint64_t icycle = __rdtsc(); #endif struct ktime ktime; setktime(&ktime); const size_t bsz = res->bsz; const size_t nfnodes = res->nfnodes; const size_t dofs = res->dofs; const uint32_t snfc = res->snfc; const double pressure = res->pressure; const double velocity_u = res->velocity_u; const double velocity_v = res->velocity_v; const double velocity_w = res->velocity_w; const double *restrict f_xyz0 = res->f_xyz0; const double *restrict f_xyz1 = res->f_xyz1; const double *restrict f_xyz2 = res->f_xyz2; const double *restrict xyz0 = res->xyz0; const double *restrict xyz1 = res->xyz1; const double *restrict xyz2 = res->xyz2; const uint32_t *restrict ie = res->ie; const uint32_t *restrict part = res->part; const uint32_t *restrict snfic = res->snfic; const uint32_t *restrict n0 = res->n0; const uint32_t *restrict n1 = res->n1; const uint32_t *restrict nfptr = res->nfptr; const uint32_t *restrict sn0 = res->sn0; const uint32_t *restrict sn1 = res->sn1; const uint32_t *restrict sn2 = res->sn2; const double *restrict x0 = res->x0; const double *restrict x1 = res->x1; const double *restrict x2 = res->x2; const double *restrict x3 = res->x3; const double *restrict q = res->q; const double *restrict w0termsx = res->w0termsx; const double *restrict w0termsy = res->w0termsy; const double *restrict w0termsz = res->w0termsz; const double *restrict w1termsx = res->w1termsx; const double *restrict w1termsy = res->w1termsy; const double *restrict w1termsz = res->w1termsz; double *restrict gradx0 = res->gradx0; double *restrict gradx1 = res->gradx1; double *restrict gradx2 = res->gradx2; memset(gradx0, 0, dofs * sizeof(double)); memset(gradx1, 0, dofs * sizeof(double)); memset(gradx2, 0, dofs * sizeof(double)); double *restrict r = res->r; memset(r, 0, dofs * sizeof(double)); // __assume_aligned(r, 64); /* Calculates the gradients at the nodes using weighted least squares This solves using Gram-Schmidt */ #pragma omp parallel { const uint32_t t = omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const uint32_t idx0 = bsz * node0; const uint32_t idx1 = bsz * node1; double dq; double termx; double termy; double termz; if(part[node0] == t) { termx = w0termsx[i]; termy = w0termsy[i]; termz = w0termsz[i]; dq = q[idx1 + 0] - q[idx0 + 0]; gradx0[idx0 + 0] += termx * dq; gradx1[idx0 + 0] += termy * dq; gradx2[idx0 + 0] += termz * dq; dq = q[idx1 + 1] - q[idx0 + 1]; gradx0[idx0 + 1] += termx * dq; gradx1[idx0 + 1] += termy * dq; gradx2[idx0 + 1] += termz * dq; dq = q[idx1 + 2] - q[idx0 + 2]; gradx0[idx0 + 2] += termx * dq; gradx1[idx0 + 2] += termy * dq; gradx2[idx0 + 2] += termz * dq; dq = q[idx1 + 3] - q[idx0 + 3]; gradx0[idx0 + 3] += termx * dq; gradx1[idx0 + 3] += termy * dq; gradx2[idx0 + 3] += termz * dq; } if(part[node1] == t) { termx = w1termsx[i]; termy = w1termsy[i]; termz = w1termsz[i]; dq = q[idx0 + 0] - q[idx1 + 0]; gradx0[idx1 + 0] += termx * dq; gradx1[idx1 + 0] += termy * dq; gradx2[idx1 + 0] += termz * dq; dq = q[idx0 + 1] - q[idx1 + 1]; gradx0[idx1 + 1] += termx * dq; gradx1[idx1 + 1] += termy * dq; gradx2[idx1 + 1] += termz * dq; dq = q[idx0 + 2] - q[idx1 + 2]; gradx0[idx1 + 2] += termx * dq; gradx1[idx1 + 2] += termy * dq; gradx2[idx1 + 2] += termz * dq; dq = q[idx0 + 3] - q[idx1 + 3]; gradx0[idx1 + 3] += termx * dq; gradx1[idx1 + 3] += termy * dq; gradx2[idx1 + 3] += termz * dq; } } } /* Calculates the fluxes on the face and performs the flux balance */ #pragma omp parallel { uint32_t t = omp_get_thread_num(); uint32_t ie0 = ie[t]; uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { uint32_t node0 = n0[i]; uint32_t node1 = n1[i]; double xn = x0[i]; double yn = x1[i]; double zn = x2[i]; double ln = x3[i]; double xmean = 0.5f * (xyz0[node0] + xyz0[node1]); double ymean = 0.5f * (xyz1[node0] + xyz1[node1]); double zmean = 0.5f * (xyz2[node0] + xyz2[node1]); /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal and V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Get variables on "left" and "right" side of face */ double rx = xmean - xyz0[node0]; double ry = ymean - xyz1[node0]; double rz = zmean - xyz2[node0]; uint32_t idx0 = bsz * node0; uint32_t idx1 = bsz * node1; // Pressure double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx; pL += gradx1[idx0 + 0] * ry; pL += gradx2[idx0 + 0] * rz; // Velocity u double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx; uL += gradx1[idx0 + 1] * ry; uL += gradx2[idx0 + 1] * rz; // Velocity v double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx; vL += gradx1[idx0 + 2] * ry; vL += gradx2[idx0 + 2] * rz; // Velocity w double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx; wL += gradx1[idx0 + 3] * ry; wL += gradx2[idx0 + 3] * rz; double ubarL = xn * uL; ubarL += yn * vL; ubarL += zn * wL; rx = xmean - xyz0[node1]; ry = ymean - xyz1[node1]; rz = zmean - xyz2[node1]; // Pressure double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx; pR += gradx1[idx1 + 0] * ry; pR += gradx2[idx1 + 0] * rz; // Velocity u double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx; uR += gradx1[idx1 + 1] * ry; uR += gradx2[idx1 + 1] * rz; // Velocity v double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx; vR += gradx1[idx1 + 2] * ry; vR += gradx2[idx1 + 2] * rz; // Velocity w double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx; wR += gradx1[idx1 + 3] * ry; wR += gradx2[idx1 + 3] * rz; double ubarR = xn * uR; ubarR += yn * vR; ubarR += zn * wR; /* Compute averages */ //double p = 0.5f * (pL + pR); double u = 0.5f * (uL + uR); double v = 0.5f * (vL + vR); double w = 0.5f * (wL + wR); double ubar = xn * u; ubar += yn * v; ubar += zn * w; double phi1 = xn * BETA; phi1 += u * ubar; double phi2 = yn * BETA; phi2 += v * ubar; double phi3 = zn * BETA; phi3 += w * ubar; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double c2 = ubar * ubar + BETA; double c = sqrt(c2); /* Now compute eigenvalues, eigenvectors, and strengths */ double eig1 = fabs(ubar); double eig2 = fabs(ubar); double eig3 = fabs(ubar + c); double eig4 = fabs(ubar - c); double dp = pR - pL; double du = uR - uL; double dv = vR - vL; double dw = wR - wL; /* Components of T(inverse) */ double ti11 = u * phi4; ti11 += v * phi5; ti11 += w * phi6; ti11 = -ti11 / BETA; double ti21 = u * phi7; ti21 += v * phi8; ti21 += w * phi9; ti21 = -ti21 / BETA; double ti31 = 0.5f * (c - ubar); ti31 /= BETA; double ti41 = -0.5f * (c + ubar); ti41 /= BETA; /* jumps (T(inverse) * dq) */ double dv1 = ti11 * dp; dv1 += phi4 * du; dv1 += phi5 * dv; dv1 += phi6 * dw; dv1 /= c2; double dv2 = ti21 * dp; dv2 += phi7 * du; dv2 += phi8 * dv; dv2 += phi9 * dw; dv2 /= c2; double dv3 = 2.f * ti31 * dp; dv3 += xn * du; dv3 += yn * dv; dv3 += zn * dw; dv3 *= 0.5f / c2; double dv4 = 2.f * ti41 * dp; dv4 += xn * du; dv4 += yn * dv; dv4 += zn * dw; dv4 *= 0.5f / c2; /* Now get elements of T */ double r13 = c * BETA; double r23 = u * (ubar + c); r23 += xn * BETA; double r33 = v * (ubar + c); r33 += yn * BETA; double r43 = w * (ubar + c); r43 += zn * BETA; double r14 = -c * BETA; double r24 = u * (ubar - c); r24 += xn * BETA; double r34 = v * (ubar - c); r34 += yn * BETA; double r44 = w * (ubar - c); r44 += zn * BETA; /* Calculate T* |lambda| * T(inverse) */ double t1 = eig3 * r13 * dv3 + eig4 * r14 * dv4; double t2 = eig1 * X1 * dv1 + eig2 * X2 * dv2; t2 += eig3 * r23 * dv3 + eig4 * r24 * dv4; double t3 = eig1 * Y1 * dv1 + eig2 * Y2 * dv2; t3 += eig3 * r33 * dv3 + eig4 * r34 * dv4; double t4 = eig1 * Z1 * dv1 + eig2 * Z2 * dv2; t4 += eig3 * r43 * dv3 + eig4 * r44 * dv4; /* Modify to calculate .5(fl +fr) from nodes instead of extrapolated ones */ double fluxp1 = ln * BETA * ubarL; double fluxp2 = ln * (uL * ubarL + xn * pL); double fluxp3 = ln * (vL * ubarL + yn * pL); double fluxp4 = ln * (wL * ubarL + zn * pL); /* Now the right side */ double fluxm1 = ln * BETA * ubarR; double fluxm2 = ln * (uR * ubarR + xn * pR); double fluxm3 = ln * (vR * ubarR + yn * pR); double fluxm4 = ln * (wR * ubarR + zn * pR); double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1); double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2); double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3); double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4); r[idx0 + 0] = (part[node0] == t) ? (r[idx0 + 0] + res1) : r[idx0 + 0]; r[idx0 + 1] = (part[node0] == t) ? (r[idx0 + 1] + res2) : r[idx0 + 1]; r[idx0 + 2] = (part[node0] == t) ? (r[idx0 + 2] + res3) : r[idx0 + 2]; r[idx0 + 3] = (part[node0] == t) ? (r[idx0 + 3] + res4) : r[idx0 + 3]; r[idx1 + 0] = (part[node1] == t) ? (r[idx1 + 0] - res1) : r[idx1 + 0]; r[idx1 + 1] = (part[node1] == t) ? (r[idx1 + 1] - res2) : r[idx1 + 1]; r[idx1 + 2] = (part[node1] == t) ? (r[idx1 + 2] - res3) : r[idx1 + 2]; r[idx1 + 3] = (part[node1] == t) ? (r[idx1 + 3] - res4) : r[idx1 + 3]; } } uint32_t i; for(i = 0; i < snfc; i++) { uint32_t if0 = snfic[i]; uint32_t if1 = snfic[i+1]; uint32_t j; #pragma omp parallel for for(j = if0; j < if1; j++) { uint32_t node0 = sn0[j]; uint32_t node1 = sn1[j]; uint32_t node2 = sn2[j]; double p1 = q[bsz * node0]; double p2 = q[bsz * node1]; double p3 = q[bsz * node2]; double ax = xyz0[node1] - xyz0[node0]; double ay = xyz1[node1] - xyz1[node0]; double az = xyz2[node1] - xyz2[node0]; double bx = xyz0[node2] - xyz0[node0]; double by = xyz1[node2] - xyz1[node0]; double bz = xyz2[node2] - xyz2[node0]; /* Normal points away from grid interior. Magnitude is 1/3 area of surface triangle. */ double xn = ay * bz; xn -= az * by; xn *= MAG1; double yn = ax * bz; yn -= az * bx; yn *= MAG0; double zn = ax * by; zn -= ay * bx; zn *= MAG1; double pa = 0.125f * (p2 + p3); pa += 0.75f * p1; double pb = 0.125f * (p3 + p1); pb += 0.75f * p2; double pc = 0.125f * (p1 + p2); pc += 0.75f * p3; uint32_t idx; idx = bsz * node0; r[idx + 1] += xn * pa; r[idx + 2] += yn * pa; r[idx + 3] += zn * pa; idx = bsz * node1; r[idx + 1] += xn * pb; r[idx + 2] += yn * pb; r[idx + 3] += zn * pb; idx = bsz * node2; r[idx + 1] += xn * pc; r[idx + 2] += yn * pc; r[idx + 3] += zn * pc; } } /* Do the free boundaries */ #pragma omp parallel for for(i = 0; i < nfnodes; i++) { uint32_t n = nfptr[i]; /* Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn has the magnitude of the face contained in it. */ double xn = f_xyz0[i]; double yn = f_xyz1[i]; double zn = f_xyz2[i]; double area = xn * xn; area += yn * yn; area += zn * zn; area = sqrt(area); xn /= area; yn /= area; zn /= area; /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector (V1) */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal with V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Calculate elements of T and T(inverse) evaluated at free-stream */ double ubar0 = xn * velocity_u; ubar0 += yn * velocity_v; ubar0 += zn * velocity_w; double c20 = ubar0 * ubar0 + BETA; double c0 = sqrt(c20); double phi1 = xn * BETA; phi1 += velocity_u * ubar0; double phi2 = yn * BETA; phi2 += velocity_v * ubar0; double phi3 = zn * BETA; phi3 += velocity_w * ubar0; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double t13 = c0 * BETA; double t23 = velocity_u * (ubar0 + c0); t23 += xn * BETA; double t33 = velocity_v * (ubar0 + c0); t33 += yn * BETA; double t43 = velocity_w * (ubar0 + c0); t43 += zn * BETA; double t14 = -c0 * BETA; double t24 = velocity_u * (ubar0 - c0); t24 += xn * BETA; double t34 = velocity_v * (ubar0 - c0); t34 += yn * BETA; double t44 = velocity_w * (ubar0 - c0); t44 += zn * BETA; double ti11 = velocity_u * phi4; ti11 += velocity_v * phi5; ti11 += velocity_w * phi6; ti11 = -ti11/BETA; double ti21 = velocity_u * phi7; ti21 += velocity_v * phi8; ti21 += velocity_w * phi9; ti21 = -ti21/BETA; double ti31 = 0.5f * (c0 - ubar0); ti31 /= BETA; double ti41 = -0.5f * (c0 + ubar0); ti41 /= BETA; /* Now, get the variables on the "inside" */ double pi = q[bsz * n + 0]; double ui = q[bsz * n + 1]; double vi = q[bsz * n + 2]; double wi = q[bsz * n + 3]; double un = xn * ui; un += yn * vi; un += zn * wi; /* If ubar is negative, take the reference condition from outside */ double pr, ur, vr, wr; if(un > 0.f) { pr = pi; ur = ui; vr = vi; wr = wi; } else { pr = pressure; ur = velocity_u; vr = velocity_v; wr = velocity_w; } /* Set rhs */ double rhs1 = ti11 * pr; rhs1 += phi4 * ur; rhs1 += phi5 * vr; rhs1 += phi6 * wr; rhs1 /= c20; double rhs2 = ti21 * pr; rhs2 += phi7 * ur; rhs2 += phi8 * vr; rhs2 += phi9 * wr; rhs2 /= c20; double rhs3 = 2.f * ti31 * pi; rhs3 += xn * ui; rhs3 += yn * vi; rhs3 += zn * wi; rhs3 = 0.5f * rhs3 / c20; double rhs4 = 2.f * ti41 * pressure; rhs4 += xn * velocity_u; rhs4 += yn * velocity_v; rhs4 += zn * velocity_w; rhs4 = 0.5f * rhs4 / c20; /* Now do matrix multiplication to get values on boundary */ double pb = t13 * rhs3; pb += t14 * rhs4; double ub = X1 * rhs1; ub += X2 * rhs2; ub += t23 * rhs3; ub += t24 * rhs4; double vb = Y1 * rhs1; vb += Y2 * rhs2; vb += t33 * rhs3; vb += t34 * rhs4; double wb = Z1 * rhs1; wb += Z2 * rhs2; wb += t43 * rhs3; wb += t44 * rhs4; double ubar = xn * ub; ubar += yn * vb; ubar += zn * wb; uint32_t idx = bsz * n; r[idx + 0] += area * BETA * ubar; r[idx + 1] += area * (ub * ubar + xn * pb); r[idx + 2] += area * (vb * ubar + yn * pb); r[idx + 3] += area * (wb * ubar + zn * pb); } compute_time(&ktime, res->t); #ifdef __USE_HW_COUNTER const uint64_t cycle = __rdtsc() - icycle; struct counters end; perf_read(fd, &end); struct tot tot; perf_calc(start, end, &tot); res->perf_counters->ctrs->flux.cycles += cycle; res->perf_counters->ctrs->flux.tot.imcR += tot.imcR; res->perf_counters->ctrs->flux.tot.imcW += tot.imcW; res->perf_counters->ctrs->flux.tot.edcR += tot.edcR; res->perf_counters->ctrs->flux.tot.edcW += tot.edcW; #endif }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/magick.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { double (*filter)(const double,const ResizeFilter *), (*window)(const double,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static double I0(double x), BesselOrderOne(double), Sinc(const double, const ResizeFilter *), SincFast(const double, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const double x,const double support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static double Blackman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static double Bohman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine=cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((1.0-x)*cosine+(1.0/MagickPI)*sine); } static double Box(const double magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(x); magick_unreferenced(resize_filter); /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static double Cosine(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Cosine window function: cos((pi/2)*x). */ return((double)cos((double) (MagickPI2*x))); } static double CubicBC(const double x,const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static double CubicSpline(const double x,const ResizeFilter *resize_filter) { if (resize_filter->support <= 2.0) { /* 2-lobe Spline filter. */ if (x < 1.0) return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0); if (x < 2.0) return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0)); return(0.0); } if (resize_filter->support <= 3.0) { /* 3-lobe Spline filter. */ if (x < 1.0) return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0); if (x < 2.0) return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0)); if (x < 3.0) return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0)); return(0.0); } /* 4-lobe Spline filter. */ if (x < 1.0) return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0); if (x < 2.0) return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0)); if (x < 3.0) return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0)); if (x < 4.0) return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0)); return(0.0); } static double Gaussian(const double x,const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static double Hann(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static double Jinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPI); return(BesselOrderOne(MagickPI*x)/x); } static double Kaiser(const double x,const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static double Lagrange(const double x,const ResizeFilter *resize_filter) { double value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static double Quadratic(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static double Sinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } return((double) 1.0); } static double SincFast(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const double xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static double Triangle(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static double Welch(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Welch parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RodidouxSharp is a slightly sharper version of Rodidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Rodidoux and % RodidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterType filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image, const FilterType filter,const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterType filter_type, window_type; double B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterType enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterType filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ { CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { double (*function)(const double,const ResizeFilter*), support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ { CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter)); if (resize_filter == (ResizeFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur=1.0; /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ( cylindrical != MagickFalse && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (IsStringTrue(artifact) != MagickFalse) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterType) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterType) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type= cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterType) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(double) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= 2*value; /* increase support linearly */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL)*MagickPI; /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(double) lobes; } if (resize_filter->filter == Jinc) { /* Convert a Jinc function lobes value to a real support value. */ if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long) resize_filter->support)-1]; /* Blur this filter so support is a integer value (lobes dependant). */ if (filter_type == LanczosRadiusFilter) resize_filter->blur*=floor(resize_filter->support)/ resize_filter->support; } /* Expert blur override. */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(double) MagickEpsilon; /* Expert override of the support setting. */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale/=resize_filter->window_support; /* * Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } { const double twoB = B+B; /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout, "# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(),(double)resize_filter->blur); if ((filter_type == GaussianFilter) || (window_type == GaussianFilter)) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(),(double)resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(),(double)resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double)support); if ( filter_type == CubicFilter || window_type == CubicFilter ) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x, GetMagickPrecision(),(double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { Image *resize_image; resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % double BesselOrderOne(double x) % % A description of each parameter follows: % % o x: double value. % */ #undef I0 static double I0(double x) { double sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((double) i*i); } return(sum); } #undef J1 static double J1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static double P1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static double Q1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static double BesselOrderOne(double x) { double p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % double GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickPrivate double *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((double *) resize_filter->coefficient); } MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickPrivate double GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % double GetResizeFilterWeight(const ResizeFilter *resize_filter, % const double x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter, const double x) { double scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { PointInfo offset; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (Quantum *) NULL) continue; offset.y=((double) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(resize_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait resize_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; offset.x=((double) x+0.5)*scale.x-0.5; status=InterpolatePixelChannels(image,image_view,resize_image,method, offset.x,offset.y,q,exception); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InterpolativeResizeImage) #endif proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image,const size_t columns, % const size_t rows,const double delta_x,const double rigidity, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *image_view, *rescale_view; gfloat *packet, *pixels; Image *rescale_image; int x_offset, y_offset; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MemoryInfo *pixel_info; register gfloat *q; ssize_t y; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,exception)); pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info); status=MagickTrue; q=pixels; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) *q++=QuantumScale*p[i]; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows, (int) GetPixelChannels(image),LQR_COLDEPTH_32F); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } rescale_view=AcquireAuthenticCacheView(rescale_image,exception); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0) { register Quantum *magick_restrict p; register ssize_t i; p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1, exception); if (p == (Quantum *) NULL) break; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait rescale_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); rescale_traits=GetPixelChannelTraits(rescale_image,channel); if ((traits == UndefinedPixelTrait) || (rescale_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange* packet[i]),p); } if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; Image *magnify_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue, exception); if (magnify_image == (Image *) NULL) return((Image *) NULL); /* Magnify image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,magnify_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } /* Magnify this row of pixels. */ for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity[9]; register const Quantum *magick_restrict p; register Quantum *magick_restrict r; register ssize_t i; size_t channels; p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } channels=GetPixelChannels(image); for (i=0; i < 9; i++) intensity[i]=GetPixelIntensity(image,p+i*channels); r=q; if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) || (fabs(intensity[3]-intensity[5]) < MagickEpsilon)) { /* Clone center pixel. */ for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1); for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; } else { /* Selectively clone pixel. */ if (fabs(intensity[1]-intensity[3]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[3*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); if (fabs(intensity[1]-intensity[5]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[5*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1); if (fabs(intensity[3]-intensity[7]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[3*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); if (fabs(intensity[5]-intensity[7]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[5*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; } q+=2*GetPixelChannels(magnify_image); } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MagnifyImage) #endif proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterType filter, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterType filter,ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ? 72.0 : image->resolution.x)+0.5); height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ? 72.0 : image->resolution.y)+0.5); resample_image=ResizeImage(image,width,height,filter,exception); if (resample_image != (Image *) NULL) { resample_image->resolution.x=x_resolution; resample_image->resolution.y=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns,const size_t rows, % const FilterType filter,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { double weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const double x_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HorizontalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const double y_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; double scale, support; MagickBooleanType status; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } gamma=0.0; for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k* GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_VerticalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterType filter,ExceptionInfo *exception) { double x_factor, y_factor; FilterType filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->alpha_trait != UndefinedPixelTrait) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x1; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Set the sampling offset, default is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x1=0; x1 < (ssize_t) sample_image->columns; x1++) x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,sample_image,1,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(sample_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++) { PixelChannel channel; PixelTrait image_traits, traits; channel=GetPixelChannelChannel(sample_image,i); traits=GetPixelChannelTraits(sample_image,channel); image_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (image_traits == UndefinedPixelTrait)) continue; SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels( image)+i],q); } q+=GetPixelChannels(sample_image); } if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SampleImage) #endif proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; double alpha, pixel[CompositePixelChannel], *scale_scanline, *scanline, *x_vector, *y_vector; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; PixelTrait scale_traits; PointInfo scale, span; register ssize_t i; ssize_t n, number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse) { scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*scanline)); scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns, MaxPixelChannels*sizeof(*scale_scanline)); y_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*y_vector)); if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) || (x_vector == (double *) NULL) || (y_vector == (double *) NULL)) { scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) MaxPixelChannels*image->columns* sizeof(*y_vector)); n=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } alpha=1.0; if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) y_vector[x*GetPixelChannels(image)+i]+=scale.y* x_vector[x*GetPixelChannels(image)+i]; span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; next_row=MagickFalse; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y* x_vector[x*GetPixelChannels(image)+i]; scanline[x*GetPixelChannels(image)+i]=pixel[i]; y_vector[x*GetPixelChannels(image)+i]=0.0; } } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[ x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } else { ssize_t t; /* Scale X direction. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; span.x=1.0; t=0; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i]; scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; } scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i]; span.x-=scale.x; } } if (span.x > 0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i]; } if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scale_scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha* scale_scanline[x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(double *) RelinquishMagickMemory(y_vector); scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(double *) RelinquishMagickMemory(scanline); x_vector=(double *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char *url, value[MagickPathExtent]; const char *name; Image *thumbnail_image; double x_factor, y_factor; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MagickPathExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception); (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if ( GetPathAttributes(image->filename,&attributes) != MagickFalse ) { (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception); } (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent, value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception); (void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception); url=GetMagickHomeURL(); (void) SetImageProperty(thumbnail_image,"software",url,exception); url=DestroyString(url); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value, exception); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value, exception); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value, exception); return(thumbnail_image); }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if (!file) file_error(filename); list *lines = make_list(); while ((path = fgetl(file))) { list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for (i = 0; i < n; ++i) { int index = rand() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = calloc(n, sizeof(char*)); int i; for (i = 0; i < n; ++i) { char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for (i = 0; i < n; ++i) { image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } //读取N个长宽分别为hw的图片 matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for (i = 0; i < n; ++i) { image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for (i = 0; i < n; ++i) { //不进行缩放的读取图片 image im = load_image_color(paths[i], 0, 0); image crop; //根据选择确定如何处理裁剪图像,是根据中心还是旋转 if (center) { crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand() % 2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ //grayscale_image_3c(crop); free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { FILE *file = fopen(filename, "r"); if (!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = calloc(size, sizeof(box_label)); while (fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5) { if (count == size) { size = size * 2; boxes = realloc(boxes, size * sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w / 2; boxes[count].right = x + w / 2; boxes[count].top = y - h / 2; boxes[count].bottom = y + h / 2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for (i = 0; i < n; ++i) { box_label swap = b[i]; int index = rand() % n; b[i] = b[index]; b[index] = swap; } } //void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) //{ // int i; // for (i = 0; i < n; ++i) { // if (boxes[i].x == 0 && boxes[i].y == 0) { // boxes[i].x = 999999; // boxes[i].y = 999999; // boxes[i].w = 999999; // boxes[i].h = 999999; // continue; // } // boxes[i].left = boxes[i].left * sx - dx; // boxes[i].right = boxes[i].right * sx - dx; // boxes[i].top = boxes[i].top * sy - dy; // boxes[i].bottom = boxes[i].bottom* sy - dy; // // if (flip) { // float swap = boxes[i].left; // boxes[i].left = 1. - boxes[i].right; // boxes[i].right = 1. - swap; // } // // boxes[i].left = constrain(0, 1, boxes[i].left); // boxes[i].right = constrain(0, 1, boxes[i].right); // boxes[i].top = constrain(0, 1, boxes[i].top); // boxes[i].bottom = constrain(0, 1, boxes[i].bottom); // // boxes[i].x = (boxes[i].left + boxes[i].right) / 2; // boxes[i].y = (boxes[i].top + boxes[i].bottom) / 2; // boxes[i].w = (boxes[i].right - boxes[i].left); // boxes[i].h = (boxes[i].bottom - boxes[i].top); // // boxes[i].w = constrain(0, 1, boxes[i].w); // boxes[i].h = constrain(0, 1, boxes[i].h); // } //} void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip, int vflip, int trans) { int i; for (i = 0; i < n; ++i) { if (boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if (flip) { float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } if (vflip) { float swap = boxes[i].top; boxes[i].top = 1. - boxes[i].bottom; boxes[i].bottom = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left + boxes[i].right) / 2; boxes[i].y = (boxes[i].top + boxes[i].bottom) / 2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); if (trans) { float temp = boxes[i].x; boxes[i].x = boxes[i].y; boxes[i].y = temp; temp = boxes[i].w; boxes[i].w = boxes[i].h; boxes[i].h = temp; } } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip, 0, 0); float x, y, w, h; int id; int i; for (i = 0; i < count && i < 90; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4 + classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index + id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "yolotraindest", "yolotrainlabel", labelpath); //find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; //读取box box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip, 0, 0); float x, y, w, h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x * num_boxes - col; y = y * num_boxes - row; int index = (col + row * num_boxes)*(5 + classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index + id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i, j; for (i = 0; i < n; ++i) { for (j = 0; j < rle[i]; ++j) { im.data[count++] = curr; } curr = 1 - curr; } for (; count < im.h*im.w*im.c; ++count) { im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for (i = 0; i < src.w*src.h; ++i) { if (src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for (k = 0; k < src.c - 1; ++k) { for (i = 0; i < s; ++i) { if (src.data[k*s + i]) { for (j = k + 1; j < src.c; ++j) { src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x, y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for (y = 0; y < im.h; ++y) { for (x = 0; x < im.w; ++x) { if (im.data[y*im.w + x]) { minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = { minx, miny, maxx - minx + 1, maxy - miny + 1 }; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if (!file) file_error(labelpath); char buff[32788]; int id; int i = 0; int j; image part = make_image(w, h, 1); while ((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes) { int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if (flip) flip_image(sized); image mask = resize_image(sized, mw, mh); truth[i*(mw*mh + 1)] = id; for (j = 0; j < mw*mh; ++j) { truth[i*(mw*mh + 1) + 1 + j] = mask.data[j]; } ++i; free_image(mask); free_image(sized); free(rle); } if (i < num_boxes) truth[i*(mw*mh + 1)] = -1; fclose(file); free_image(part); } void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if (!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while ((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes) { int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if (flip) flip_image(sized); box b = bound_image(sized); if (b.w > 0) { image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw * mh + 1) + 0] = (b.x + b.w / 2.) / sized.w; truth[i*(4 + mw * mh + 1) + 1] = (b.y + b.h / 2.) / sized.h; truth[i*(4 + mw * mh + 1) + 2] = b.w / sized.w; truth[i*(4 + mw * mh + 1) + 3] = b.h / sized.h; int j; for (j = 0; j < mw*mh; ++j) { truth[i*(4 + mw * mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw * mh + 1) + 4 + mw * mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } //void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) //{ // char labelpath[4096]; // find_replace(path, "yolotraindest", "yolotrainlabel", labelpath); // //find_replace(labelpath, "JPEGImages", "labels", labelpath); // // find_replace(labelpath, ".jpg", ".txt", labelpath); // find_replace(labelpath, ".png", ".txt", labelpath); // find_replace(labelpath, ".JPG", ".txt", labelpath); // find_replace(labelpath, ".JPEG", ".txt", labelpath); // int count = 0; // box_label *boxes = read_boxes(labelpath, &count); // randomize_boxes(boxes, count); // correct_boxes(boxes, count, dx, dy, sx, sy, flip); // if (count > num_boxes) count = num_boxes; // float x, y, w, h; // int id; // int i; // int sub = 0; // // for (i = 0; i < count; ++i) { // x = boxes[i].x; // y = boxes[i].y; // w = boxes[i].w; // h = boxes[i].h; // id = boxes[i].id; // // if ((w < .001 || h < .001)) { // ++sub; // continue; // } // // truth[(i - sub) * 5 + 0] = x; // truth[(i - sub) * 5 + 1] = y; // truth[(i - sub) * 5 + 2] = w; // truth[(i - sub) * 5 + 3] = h; // truth[(i - sub) * 5 + 4] = id; // } // free(boxes); //} void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int vflip, int trans) { char labelpath[4096]; find_replace(path, "yolotraindest", "yolotrainlabel", labelpath); //find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip, vflip, trans); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int i; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) { ++sub; continue; } truth[(i - sub) * 5 + 0] = x; truth[(i - sub) * 5 + 1] = y; truth[(i - sub) * 5 + 2] = w; truth[(i - sub) * 5 + 3] = h; truth[(i - sub) * 5 + 4] = id; } free(boxes); } void fill_truth_autoparamet(char *path, int datalength, float *truth) { char labelpath[4096]; find_replace(path, "yolotraindest", "yolotrainlabel", labelpath); //find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if (!file) file_error(labelpath); fscanf(file, "%f %f %f %f %f ", &truth[0], &truth[1], &truth[2], &truth[3], &truth[4]); fscanf(file, "%f %f %f %f %f ", &truth[5], &truth[6], &truth[7], &truth[8], &truth[9]); fscanf(file, "%f %f %f %f %f ", &truth[10], &truth[11], &truth[12], &truth[13], &truth[14]); fscanf(file, "%f %f %f %f %f ", &truth[15], &truth[16], &truth[17], &truth[18], &truth[19]); fclose(file); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for (i = 0; i < n; ++i) { int index = max_index(pred + i * NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for (i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i) { int index = alphanum_to_int(begin[i]); if (index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS + index] = 1; } for (; i < n; ++i) { truth[i*NUMCHARS + NUMCHARS - 1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if (m) paths = get_random_paths(paths, n, m); data d = { 0 }; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for (i = 0; i < n; ++i) { fill_truth_captcha(paths[i], k, d.y.vals[i]); } if (m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if (m) paths = get_random_paths(paths, n, m); data d = { 0 }; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if (m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = 1; ++count; //printf("%s %s %d\n", path, labels[i], i); } } if (count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for (j = 0; j < k; ++j) { if (truth[j]) { int parent = hierarchy->parent[j]; while (parent >= 0) { truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for (j = 0; j < hierarchy->groups; ++j) { //printf("%d\n", count); int mask = 1; for (i = 0; i < hierarchy->group_size[j]; ++i) { if (truth[count + i]) { mask = 0; break; } } if (mask) { for (i = 0; i < hierarchy->group_size[j]; ++i) { truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i, j; for (i = 0; i < n; ++i) { char labelpath[4096]; find_replace(paths[i], "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".BMP", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPeG", ".txt", labelpath); find_replace(labelpath, ".Jpeg", ".txt", labelpath); find_replace(labelpath, ".PNG", ".txt", labelpath); find_replace(labelpath, ".TIF", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".jpeg", ".txt", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".tif", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); for (j = 0; j < k; ++j) { fscanf(file, "%f", &(y.vals[i][j])); } fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for (i = 0; i < n && labels; ++i) { fill_truth(paths[i], labels, k, y.vals[i]); if (hierarchy) { fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; //int count = 0; for (i = 0; i < n; ++i) { char label[4096]; find_replace(paths[i], "images", "labels", label); find_replace(label, ".jpg", ".txt", label); FILE *file = fopen(label, "r"); if (!file) continue; //++count; int tag; while (fscanf(file, "%d", &tag) == 1) { if (tag < k) { y.vals[i][tag] = 1; } } fclose(file); } //printf("%d/%d\n", count, n); return y; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if (!d.shallow) { free_matrix(d.X); free_matrix(d.y); } else { free(d.X.vals); free(d.y.vals); } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if (!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while (fscanf(file, "%d %s", &id, buff) == 2) { int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes + 1); int i; for (i = 0; i < w*h; ++i) { mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if (!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while (fscanf(file, "%d %s", &id, buff) == 2) { int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for (i = 0; i < w*h; ++i) { if (part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 3; d.y.rows = n; d.y.cols = h * w*classes / div / div; d.y.vals = calloc(d.X.rows, sizeof(float*)); for (i = 0; i < n; ++i) { image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand() % 2; if (flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale / div, a.w / div, a.h / div, a.dx / div, a.dy / div, a.aspect); if (flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 3; d.y = make_matrix(n, (((w / div)*(h / div)) + 1)*boxes); for (i = 0; i < n; ++i) { image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand() % 2; if (flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w / div, h / div); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 3; d.y = make_matrix(n, (coords + 1)*boxes); for (i = 0; i < n; ++i) { image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand() % 2; if (flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 3; int k = size * size*(5 + classes); d.y = make_matrix(n, k); for (i = 0; i < n; ++i) { image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand() % 2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1. / sx, 1. / sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if (m) paths = get_random_paths(paths, 2 * n, m); int i, j; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 6; int k = 2 * (classes); d.y = make_matrix(n, k); for (i = 0; i < n; ++i) { image im1 = load_image_color(paths[i * 2], w, h); image im2 = load_image_color(paths[i * 2 + 1], w, h); d.X.vals[i] = calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w * 3 * sizeof(float)); memcpy(d.X.vals[i] + h * w * 3, im2.data, h*w * 3 * sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i * 2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while (fscanf(fp1, "%d %f", &id, &iou) == 2) { if (d.y.vals[i][2 * id] < iou) d.y.vals[i][2 * id] = iou; } find_replace(paths[i * 2 + 1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while (fscanf(fp2, "%d %f", &id, &iou) == 2) { if (d.y.vals[i][2 * id + 1] < iou) d.y.vals[i][2 * id + 1] = iou; } for (j = 0; j < classes; ++j) { if (d.y.vals[i][2 * j] > .5 && d.y.vals[i][2 * j + 1] < .5) { d.y.vals[i][2 * j] = 1; d.y.vals[i][2 * j + 1] = 0; } else if (d.y.vals[i][2 * j] < .5 && d.y.vals[i][2 * j + 1] > .5) { d.y.vals[i][2 * j] = 0; d.y.vals[i][2 * j + 1] = 1; } else { d.y.vals[i][2 * j] = SECRET_NUM; d.y.vals[i][2 * j + 1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if (m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand() % n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = { 0 }; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 3; int k = (4 + classes) * 90; d.y = make_matrix(1, k); int dw = w * jitter; int dh = h * jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand() % 2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / w) / sx; float dy = ((float)ptop / h) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1. / sx, 1. / sy); free_image(orig); free_image(cropped); return d; } data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 3; d.y = make_matrix(n, 5 * boxes); for (i = 0; i < n; ++i) { image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); //float scale = rand_uniform(.25, 2); float scale = 1; float nw, nh; if (new_ar < 1) { nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); random_distort_image(sized, hue, saturation, exposure); int flip = rand() % 2; if (flip) flip_image(sized); int vflip = rand() % 2; if (vflip) vflip_image(sized); int trans = rand() % 2; if (trans) transpose_image(sized); d.X.vals[i] = sized.data; fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx / w, -dy / h, nw / w, nh / h, vflip, trans); free_image(orig); } free(random_paths); return d; } data load_data_autoparamet(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h * w * 3; d.y = make_matrix(n, 20); for (i = 0; i < n; ++i) { image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); //float scale = rand_uniform(.25, 2); float scale = 1; float nw, nh; if (new_ar < 1) { nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); //random_distort_image(sized, hue, saturation, exposure); //int flip = rand() % 2; //if (flip) // flip_image(sized); //int vflip = rand() % 2; //if (vflip) // vflip_image(sized); //int trans = rand() % 2; //if (trans) // transpose_image(sized); d.X.vals[i] = sized.data; fill_truth_autoparamet(random_paths[i], 20, d.y.vals[i]); //fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, // -dx / w, -dy / h, nw / w, nh / h, vflip, trans); free_image(orig); } free(random_paths); return d; } #ifdef PTHREAD void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if (a.exposure == 0) a.exposure = 1; if (a.saturation == 0) a.saturation = 1; if (a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA) { *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA) { *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA) { *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA) { *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA) { *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == ISEG_DATA) { *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == INSTANCE_DATA) { *a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA) { *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA) { *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA) { *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA) { *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA) { *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA) { *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA) { *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if (pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data *buffers = calloc(args.threads, sizeof(data)); pthread_t *threads = calloc(args.threads, sizeof(pthread_t)); for (i = 0; i < args.threads; ++i) { args.d = buffers + i; args.n = (i + 1) * total / args.threads - i * total / args.threads; threads[i] = load_data_in_thread(args); } for (i = 0; i < args.threads; ++i) { pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for (i = 0; i < args.threads; ++i) { buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if (pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } #endif data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if (m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = { 0 }; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if (m) free(paths); int i; for (i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if (m) paths = get_random_paths(paths, n, m); data d = { 0 }; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if (m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if (m) paths = get_random_paths(paths, n, m); data d = { 0 }; d.shallow = 0; int i; d.X.rows = n; d.X.vals = calloc(n, sizeof(float*)); d.X.cols = w * h * 3; d.y.rows = n; d.y.vals = calloc(n, sizeof(float*)); d.y.cols = w * scale * h*scale * 3; for (i = 0; i < n; ++i) { image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand() % 2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if (m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if (m) paths = get_random_paths(paths, n, m); data d = { 0 }; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n, k); if (m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = { 0 }; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = calloc(orig[0].y.rows, sizeof(float *)); int i; for (i = 0; i < d.X.rows; ++i) { d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for (i = 0; i < divs*divs; ++i) { data d; d.shallow = 0; d.w = orig.w / divs * size; d.h = orig.h / divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h * 3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for (j = 0; j < orig.X.rows; ++j) { int x = (i%divs) * orig.w / divs - (d.w - orig.w / divs) / 2; int y = (i / divs) * orig.h / divs - (d.h - orig.h / divs) / 2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = { 0 }; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w * h * 3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for (i = 0; i < orig.X.rows; ++i) { image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if (m) paths = get_random_paths(paths, n, m); data d = { 0 }; d.shallow = 0; d.w = size; d.h = size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if (m) paths = get_random_paths(paths, n, m); data d = { 0 }; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if (m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows + m2.rows; m.vals = calloc(m1.rows + m2.rows, sizeof(float*)); for (i = 0; i < m1.rows; ++i) { m.vals[count++] = m1.vals[i]; } for (i = 0; i < m2.rows; ++i) { m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = { 0 }; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = { 0 }; for (i = 0; i < n; ++i) { data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = { 0 }; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = { 0 }; d.shallow = 0; long i, j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if (!fp) file_error(filename); for (i = 0; i < 10000; ++i) { unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for (j = 0; j < X.cols; ++j) { X.vals[i][j] = (double)bytes[j + 1]; } } scale_data_rows(d, 1. / 255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for (j = 0; j < n; ++j) { int index = rand() % d.X.rows; memcpy(X + j * d.X.cols, d.X.vals[index], d.X.cols * sizeof(float)); memcpy(y + j * d.y.cols, d.y.vals[index], d.y.cols * sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for (j = 0; j < n; ++j) { int index = offset + j; memcpy(X + j * d.X.cols, d.X.vals[index], d.X.cols * sizeof(float)); if (y) memcpy(y + j * d.y.cols, d.y.vals[index], d.y.cols * sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for (i = 0; i < d.y.rows; ++i) { for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = eps * scale + (1 - eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = { 0 }; d.shallow = 0; int i, j, b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for (b = 0; b < 5; ++b) { char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b + 1); FILE *fp = fopen(buff, "rb"); if (!fp) file_error(buff); for (i = 0; i < 10000; ++i) { unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i + b * 10000][class] = 1; for (j = 0; j < X.cols; ++j) { X.vals[i + b * 10000][j] = (double)bytes[j + 1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1. / 255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if (!fp) file_error(filename); char *label; int count = 0; while ((label = fgetl(fp))) { int i; if (count == X.rows) { X = resize_matrix(X, count * 2); y = resize_matrix(y, count * 2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row * 19 + col; y.vals[count][index] = 1; for (i = 0; i < 19 * 19; ++i) { float val = 0; if (board[i] == '1') val = 1; else if (board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = { 0 }; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for (i = d.X.rows - 1; i > 0; --i) { int index = rand() % i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for (i = 0; i < d.X.rows; ++i) { scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for (i = 0; i < d.X.rows; ++i) { translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = { 0 }; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for (i = 0; i < d.X.rows; ++i) { normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = { 0 }; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = { 0 }; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = calloc(num, sizeof(float *)); r.y.vals = calloc(num, sizeof(float *)); int i; for (i = 0; i < num; ++i) { int index = rand() % d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = calloc(2, sizeof(data)); int i; int start = part * d.X.rows / total; int end = (part + 1)*d.X.rows / total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end - start; train.X.rows = train.y.rows = d.X.rows - (end - start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = calloc(train.X.rows, sizeof(float*)); test.X.vals = calloc(test.X.rows, sizeof(float*)); train.y.vals = calloc(train.y.rows, sizeof(float*)); test.y.vals = calloc(test.y.rows, sizeof(float*)); for (i = 0; i < start; ++i) { train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for (i = start; i < end; ++i) { test.X.vals[i - start] = d.X.vals[i]; test.y.vals[i - start] = d.y.vals[i]; } for (i = end; i < d.X.rows; ++i) { train.X.vals[i - (end - start)] = d.X.vals[i]; train.y.vals[i - (end - start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
se2ramp.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) static PyObject *se2ramp(PyObject *self, PyObject *args, PyObject *keywds); static PyObject *se2ramp(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *etc; PyArrayObject *x,*y,*rampparams; double goal,r0,r1,r4,r5,pm0,pm1; int i; npy_intp dims[1]; // etc = PyList_New(0); static char *kwlist[] = {"rampparams","x","etc",NULL}; if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc)) { return NULL; } goal = IND(rampparams,0); r0 = IND(rampparams,1); r1 = IND(rampparams,2); pm0 = IND(rampparams,3); r4 = IND(rampparams,4); r5 = IND(rampparams,5); pm1 = IND(rampparams,6); dims[0] = x->dimensions[0]; y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE); #pragma omp parallel for for(i=0;i<dims[0];i++) { IND(y,i) = goal + pm0*exp(-r0*IND(x,i) + r1) + pm1*exp(-r4*IND(x,i) + r5); } return PyArray_Return(y); } static char module_docstring[]="\ This function creates a model that fits a ramp using a rising exponential.\n\ \n\ Parameters\n\ ----------\n\ goal: goal as x -> inf\n\ m1,m2: rise exp\n\ t1,t2: time offset\n\ t: Array of time/phase points\n\ \n\ Returns\n\ -------\n\ This function returns an array of y values by combining an eclipse and a rising exponential\n\ \n\ Revisions\n\ ---------\n\ 2010-07-30 Kevin Stevenson, UCF \n\ kevin218@knights.ucf.edu\n\ Original version\n\ 2010-12-24 Nate Lust, UCF\n\ natelust at linux dot com\n\ Converted to C\n\ 2018-11-22 Jonathan Fraine, SSI\n\ jfraine at spacescience.org\n\ Updated c extensions to python3, with support for python2.7\n\ "; static PyMethodDef module_methods[] = { {"se2ramp",(PyCFunction)se2ramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}}; PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_se2ramp(void) #else initse2ramp(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "se2ramp", /* m_name */ module_docstring, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); if (!module) return NULL; /* Load `numpy` functionality. */ import_array(); return module; #else PyObject *m = Py_InitModule3("se2ramp", module_methods, module_docstring); if (m == NULL) return; /* Load `numpy` functionality. */ import_array(); #endif }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (MagickRealType *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(MagickRealType *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); exception=(&image->exception); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; pixel.red=(double) ScaleQuantumToChar(p->red); pixel.green=(double) ScaleQuantumToChar(p->green); pixel.blue=(double) ScaleQuantumToChar(p->blue); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) && (pixel.red <= (double) (cluster->red.right+SafeMargin)) && (pixel.green >= (double) (cluster->green.left-SafeMargin)) && (pixel.green <= (double) (cluster->green.right+SafeMargin)) && (pixel.blue >= (double) (cluster->blue.left-SafeMargin)) && (pixel.blue <= (double) (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=pixel.red; cluster->green.center+=pixel.green; cluster->blue.center+=pixel.blue; cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; SetPixelIndex(indexes+x,0); pixel.red=(double) ScaleQuantumToChar(q->red); pixel.green=(double) ScaleQuantumToChar(q->green); pixel.blue=(double) ScaleQuantumToChar(q->blue); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) && (pixel.red <= (double) (cluster->red.right+SafeMargin)) && (pixel.green >= (double) (cluster->green.left-SafeMargin)) && (pixel.green <= (double) (cluster->green.right+SafeMargin)) && (pixel.blue >= (double) (cluster->blue.left-SafeMargin)) && (pixel.blue <= (double) (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared= squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+ squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+ squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared= squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+ squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+ squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; pixel.red=(double) ScaleQuantumToChar(p->red); pixel.green=(double) ScaleQuantumToChar(p->green); pixel.blue=(double) ScaleQuantumToChar(p->blue); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) && (pixel.red <= (double) (cluster->red.right+SafeMargin)) && (pixel.green >= (double) (cluster->green.left-SafeMargin)) && (pixel.green <= (double) (cluster->green.right+SafeMargin)) && (pixel.blue >= (double) (cluster->blue.left-SafeMargin)) && (pixel.blue <= (double) (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=pixel.red; cluster->green.center+=pixel.green; cluster->blue.center+=pixel.blue; cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau*=PerceptibleReciprocal((MagickRealType) number_nodes); /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
MeshRefiner.h
/** * @file * This file is part of SeisSol. * * @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger) * * @section LICENSE * Copyright (c) 2015, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION */ #ifndef MESH_REFINER_H_ #define MESH_REFINER_H_ #include <cstring> #include "Geometry/MeshReader.h" #include "RefinerUtils.h" namespace seissol { namespace refinement { //------------------------------------------------------------------------------ template<typename T> class MeshRefiner { private: // m_cells contains the indices of the cells unsigned int* m_cells; T* m_vertices; size_t m_numSubCells; size_t m_numVertices; static const unsigned int kIndicesPerCell = 4; const unsigned int kSubCellsPerCell; public: MeshRefiner(const MeshReader& meshReader, const TetrahedronRefiner<T>& tetRefiner); MeshRefiner(const std::vector<const Element *>& subElements, const std::vector<const Vertex *>& subVertices, const std::map<int, int>& oldToNewVertexMap, const TetrahedronRefiner<T>& tetRefiner); ~MeshRefiner(); const unsigned int* getCellData() const; const T* getVertexData() const; std::size_t getNumCells() const; std::size_t getNumVertices() const; }; //------------------------------------------------------------------------------ template<typename T> MeshRefiner<T>::MeshRefiner( const MeshReader& meshReader, const TetrahedronRefiner<T>& tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = meshReader.getVertices().size(); const size_t kInCellCount = meshReader.getElements().size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector<Vertex>& kVertices = meshReader.getVertices(); const std::vector<Element>& kElements = meshReader.getElements(); // Copy original vertices #ifdef _OPENMP #pragma omp parallel for #endif // _OPENMP for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i*3], kVertices[i].coords, sizeof(double)*3); } // The pointer to the new vertices T* newVertices = &m_vertices[kInVertexCount*3]; // Start the actual cell-refinement #ifdef _OPENMP #pragma omp parallel { #endif // _OPENMPI glm::tvec3<T>* newVerticesTmp = new glm::tvec3<T>[additionalVertices]; Tetrahedron<T>* newTetsTmp = new Tetrahedron<T>[kSubCellsPerCell]; #ifdef _OPENMP #pragma omp for schedule(static) nowait #endif // _OPENMP for (size_t c = 0; c < kInCellCount; ++c) { // Build a Terahedron containing the coordinates of the vertices. Tetrahedron<T> inTet = Tetrahedron<T>( kVertices[kElements[c].vertices[0]].coords, kVertices[kElements[c].vertices[1]].coords, kVertices[kElements[c].vertices[2]].coords, kVertices[kElements[c].vertices[3]].coords, kElements[c].vertices[0], kElements[c].vertices[1], kElements[c].vertices[2], kElements[c].vertices[3]); // Generate the tets tetRefiner.refine(inTet, kInVertexCount + c*additionalVertices, newTetsTmp, newVerticesTmp); // Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c*additionalVertices + i) * 3], glm::value_ptr(newVerticesTmp[i]), sizeof(T)*3); } // Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c*kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c*kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c*kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c*kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete [] newVerticesTmp; delete [] newTetsTmp; #ifdef _OPENMP } #endif }; template<typename T> MeshRefiner<T>::MeshRefiner( const std::vector<const Element *>& subElements, const std::vector<const Vertex *>& subVertices, const std::map<int, int>& oldToNewVertexMap, const TetrahedronRefiner<T>& tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = subVertices.size(); const size_t kInCellCount = subElements.size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector<const Vertex*>& kVertices = subVertices; const std::vector<const Element*>& kElements = subElements; // Copy original vertices #ifdef _OPENMP #pragma omp parallel for #endif // _OPENMP for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i*3], kVertices[i]->coords, sizeof(double)*3); } // The pointer to the new vertices T* newVertices = &m_vertices[kInVertexCount*3]; // Start the actual cell-refinement #ifdef _OPENMP #pragma omp parallel shared(oldToNewVertexMap) { #endif // _OPENMPI glm::tvec3<T>* newVerticesTmp = new glm::tvec3<T>[additionalVertices]; Tetrahedron<T>* newTetsTmp = new Tetrahedron<T>[kSubCellsPerCell]; #ifdef _OPENMP #pragma omp for schedule(static) nowait #endif // _OPENMP for (size_t c = 0; c < kInCellCount; ++c) { // Build a Terahedron containing the coordinates of the vertices. Tetrahedron<T> inTet = Tetrahedron<T>( kVertices[oldToNewVertexMap.at(kElements[c]->vertices[0])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[1])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[2])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[3])]->coords, oldToNewVertexMap.at(kElements[c]->vertices[0]), oldToNewVertexMap.at(kElements[c]->vertices[1]), oldToNewVertexMap.at(kElements[c]->vertices[2]), oldToNewVertexMap.at(kElements[c]->vertices[3])); // Generate the tets tetRefiner.refine(inTet, kInVertexCount + c*additionalVertices, newTetsTmp, newVerticesTmp); // Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c*additionalVertices + i) * 3], glm::value_ptr(newVerticesTmp[i]), sizeof(T)*3); } // Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c*kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c*kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c*kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c*kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete [] newVerticesTmp; delete [] newTetsTmp; #ifdef _OPENMP } #endif }; template<typename T> MeshRefiner<T>::~MeshRefiner() { delete [] m_cells; delete [] m_vertices; } //------------------------------------------------------------------------------ template<typename T> const unsigned int* MeshRefiner<T>::getCellData() const { return &m_cells[0]; } //------------------------------------------------------------------------------ template<typename T> const T* MeshRefiner<T>::getVertexData() const { return &m_vertices[0]; } //------------------------------------------------------------------------------ template<typename T> std::size_t MeshRefiner<T>::getNumCells() const { return m_numSubCells; } //------------------------------------------------------------------------------ template<typename T> std::size_t MeshRefiner<T>::getNumVertices() const { return m_numVertices; } //------------------------------------------------------------------------------ } // namespace } #endif // MESH_REFINER_H_
triplet_iw.c
/* Copyright (C) 2016 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stddef.h> #include <math.h> #include <phonoc_utils.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_iw.h> #include <tetrahedron_method.h> static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies, TPLCONST size_t vertices[2][24][4], const int num_band, const int b1, const int b2); static int set_g(double g[3], const double f0, TPLCONST double freq_vertices[3][24][4]); static int in_tetrahedra(const double f0, TPLCONST double freq_vertices[24][4]); static void get_triplet_tetrahedra_vertices( size_t vertices[2][24][4], TPLCONST int tp_relative_grid_address[2][24][4][3], const int mesh[3], const size_t triplet[3], TPLCONST int (*bz_grid_address)[3], const size_t *bz_map); void tpi_get_integration_weight(double *iw, char *iw_zero, const double *frequency_points, const size_t num_band0, TPLCONST int tp_relative_grid_address[2][24][4][3], const int mesh[3], const size_t triplets[3], const size_t num_triplets, TPLCONST int (*bz_grid_address)[3], const size_t *bz_map, const double *frequencies, const size_t num_band, const size_t num_iw, const int openmp_per_bands) { size_t j, b1, b2, b12, num_band_prod, adrs_shift; size_t vertices[2][24][4]; double g[3]; double freq_vertices[3][24][4]; get_triplet_tetrahedra_vertices(vertices, tp_relative_grid_address, mesh, triplets, bz_grid_address, bz_map); num_band_prod = num_triplets * num_band0 * num_band * num_band; #pragma omp parallel for private(j, b1, b2, adrs_shift, g, freq_vertices) if (openmp_per_bands) for (b12 = 0; b12 < num_band * num_band; b12++) { b1 = b12 / num_band; b2 = b12 % num_band; set_freq_vertices (freq_vertices, frequencies, vertices, num_band, b1, b2); for (j = 0; j < num_band0; j++) { adrs_shift = j * num_band * num_band + b1 * num_band + b2; iw_zero[adrs_shift] = set_g(g, frequency_points[j], freq_vertices); iw[adrs_shift] = g[0]; adrs_shift += num_band_prod; iw[adrs_shift] = g[1] - g[2]; if (num_iw == 3) { adrs_shift += num_band_prod; iw[adrs_shift] = g[0] + g[1] + g[2]; } } } } void tpi_get_integration_weight_with_sigma(double *iw, char *iw_zero, const double sigma, const double cutoff, const double *frequency_points, const size_t num_band0, const size_t triplet[3], const size_t const_adrs_shift, const double *frequencies, const size_t num_band, const size_t num_iw, const int openmp_per_bands) { size_t j, b12, b1, b2, adrs_shift; double f0, f1, f2, g0, g1, g2; #pragma omp parallel for private(j, b1, b2, f0, f1, f2, g0, g1, g2, adrs_shift) if (openmp_per_bands) for (b12 = 0; b12 < num_band * num_band; b12++) { b1 = b12 / num_band; b2 = b12 % num_band; f1 = frequencies[triplet[1] * num_band + b1]; f2 = frequencies[triplet[2] * num_band + b2]; for (j = 0; j < num_band0; j++) { f0 = frequency_points[j]; adrs_shift = j * num_band * num_band + b1 * num_band + b2; if (cutoff > 0 && fabs(f0 - f1 - f2) > cutoff && fabs(f0 + f1 - f2) > cutoff && fabs(f0 - f1 + f2) > cutoff) { iw_zero[adrs_shift] = 1; g0 = 0; g1 = 0; g2 = 0; } else { iw_zero[adrs_shift] = 0; g0 = gaussian(f0 - f1 - f2, sigma); g1 = gaussian(f0 + f1 - f2, sigma); g2 = gaussian(f0 - f1 + f2, sigma); } iw[adrs_shift] = g0; adrs_shift += const_adrs_shift; iw[adrs_shift] = g1 - g2; if (num_iw == 3) { adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 + g1 + g2; } } } } static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies, TPLCONST size_t vertices[2][24][4], const int num_band, const int b1, const int b2) { int i, j; double f1, f2; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { f1 = frequencies[vertices[0][i][j] * num_band + b1]; f2 = frequencies[vertices[1][i][j] * num_band + b2]; if (f1 < 0) {f1 = 0;} if (f2 < 0) {f2 = 0;} freq_vertices[0][i][j] = f1 + f2; freq_vertices[1][i][j] = -f1 + f2; freq_vertices[2][i][j] = f1 - f2; } } } static int set_g(double g[3], const double f0, TPLCONST double freq_vertices[3][24][4]) { int iw_zero; iw_zero = 1; if (in_tetrahedra(f0, freq_vertices[0])) { g[0] = thm_get_integration_weight(f0, freq_vertices[0], 'I'); iw_zero = 0; } else { g[0] = 0; } if (in_tetrahedra(f0, freq_vertices[1])) { g[1] = thm_get_integration_weight(f0, freq_vertices[1], 'I'); iw_zero = 0; } else { g[1] = 0; } if (in_tetrahedra(f0, freq_vertices[2])) { g[2] = thm_get_integration_weight(f0, freq_vertices[2], 'I'); iw_zero = 0; } else { g[2] = 0; } return iw_zero; } static int in_tetrahedra(const double f0, TPLCONST double freq_vertices[24][4]) { int i, j; double fmin, fmax; fmin = freq_vertices[0][0]; fmax = freq_vertices[0][0]; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { if (fmin > freq_vertices[i][j]) { fmin = freq_vertices[i][j]; } if (fmax < freq_vertices[i][j]) { fmax = freq_vertices[i][j]; } } } if (fmin > f0 || fmax < f0) { return 0; } else { return 1; } } static void get_triplet_tetrahedra_vertices( size_t vertices[2][24][4], TPLCONST int tp_relative_grid_address[2][24][4][3], const int mesh[3], const size_t triplet[3], TPLCONST int (*bz_grid_address)[3], const size_t *bz_map) { int i, j; for (i = 0; i < 2; i++) { for (j = 0; j < 24; j++) { thm_get_dense_neighboring_grid_points(vertices[i][j], triplet[i + 1], tp_relative_grid_address[i][j], 4, mesh, bz_grid_address, bz_map); } } }
GB_unaryop__identity_fp32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_uint8 // op(A') function: GB_tran__identity_fp32_uint8 // C type: float // A type: uint8_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_uint8 ( float *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pcpdlpverifydsaca.c
/******************************************************************************* * Copyright 2005-2018 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // // Purpose: // Cryptography Primitive. // DL over Prime Finite Field (Verify, DSA version) // // Contents: // ippsDLPVerifyDSA() // // */ #include "owndefs.h" #include "owncp.h" #include "pcpdlp.h" /*F* // Name: ippsDLPVerifyDSA // // Purpose: Verify Signature (DSA version) // // Returns: Reason: // ippStsNullPtrErr NULL == pDL // NULL == pMsgDigest // NULL == pSignR // NULL == pSignS // NULL == pResult // // ippStsContextMatchErr illegal pDL->idCtx // illegal pMsgDigest->idCtx // illegal pSignR->idCtx // illegal pSignS->idCtx // // ippStsIncompleteContextErr // incomplete context // // ippStsMessageErr MsgDigest >= R // MsgDigest < 0 // // ippStsNoErr no errors // // Parameters: // pMsgDigest pointer to the message representative to be signed // pSignR,pSignS pointer to the signature // pResult pointer to the result: IppSignIsValid/IppSignIsInvalid // pDSA pointer to the DL context // // Primitive sequence call: // 1) set up domain parameters // 2) set up (signatory's) public key *F*/ #if !defined(_OPENMP) IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest, const IppsBigNumState* pSignR, const IppsBigNumState* pSignS, IppDLResult* pResult, IppsDLPState* pDL)) { /* test context*/ IPP_BAD_PTR2_RET(pDL,pResult); pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) ); IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr); /* test operation flag */ IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr); /* test message representative */ IPP_BAD_PTR1_RET(pMsgDigest); pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr); IPP_BADARG_RET(BN_NEGATIVE(pMsgDigest), ippStsMessageErr); /* make sure msg <order */ IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr); /* test signature */ IPP_BAD_PTR2_RET(pSignR,pSignS); pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) ); pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr); IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr); /* test signature range */ if(0<cpBN_cmp(cpBN_OneRef(), pSignR)|| 0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } if(0<cpBN_cmp(cpBN_OneRef(), pSignS)|| 0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } { /* allocate BN resources */ BigNumNode* pList = DLP_BNCTX(pDL); IppsBigNumState* pW = cpBigNumListGet(&pList); IppsBigNumState* pU1 = cpBigNumListGet(&pList); IppsBigNumState* pU2 = cpBigNumListGet(&pList); IppsBigNumState* pOrder = cpBigNumListGet(&pList); ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder); /* W = 1/SignS (mod R) */ ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW); cpMontEnc_BN(pW, pW, DLP_MONTR(pDL)); /* reduct pMsgDigest if necessary */ if(0 < cpBN_cmp(pMsgDigest, pOrder)) ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1); else cpBN_copy(pU1, pMsgDigest); /* U1 = (MsgDigest*W) (mod R) */ cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL)); /* U2 = (SignR*W) (mod R) */ cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL)); /* // V = ((G^U1)*(Y^U2) (mod P)) (mod R) */ /* precompute multi-exp table {1, G, Y, G*Y} */ { cpSize pSize = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) ); BNU_CHUNK_T* pX1 = BN_NUMBER(DLP_GENC(pDL)); BNU_CHUNK_T* pX2 = BN_NUMBER(DLP_YENC(pDL)); const BNU_CHUNK_T* ppX[2]; ppX[0] = pX1; ppX[1] = pX2; ZEXPAND_BNU(pX1, BN_SIZE(DLP_GENC(pDL)), pSize); ZEXPAND_BNU(pX2, BN_SIZE(DLP_YENC(pDL)), pSize); cpMontMultiExpInitArray(DLP_METBL(pDL), ppX, pSize*BITSIZE(BNU_CHUNK_T), 2, DLP_MONTP0(pDL)); } /* W = ((G^U1)*(Y^U2) (mod P) */ { cpSize sizeE1 = BN_SIZE(pU1); cpSize sizeE2 = BN_SIZE(pU2); cpSize sizeE = IPP_MAX(sizeE1, sizeE2); BNU_CHUNK_T* pE1 = BN_NUMBER(pU1); BNU_CHUNK_T* pE2 = BN_NUMBER(pU2); const Ipp8u* ppE[2]; ppE[0] = (Ipp8u*)pE1; ppE[1] = (Ipp8u*)pE2; ZEXPAND_BNU(pE1, sizeE1, sizeE); ZEXPAND_BNU(pE2, sizeE2, sizeE); cpFastMontMultiExp(BN_NUMBER(pW), DLP_METBL(pDL), ppE, sizeE*BITSIZE(BNU_CHUNK_T), 2, DLP_MONTP0(pDL)); BN_SIZE(pW) = BITS_BNU_CHUNK( DLP_BITSIZEP(pDL) ); BN_SIGN(pW) = ippBigNumPOS; } cpMontDec_BN(pW, pW, DLP_MONTP0(pDL)); BN_SIZE(pW) = cpMod_BNU(BN_NUMBER(pW), BN_SIZE(pW), BN_NUMBER(pOrder), BN_SIZE(pOrder)); /* result = W~R */ *pResult = 0==cpBN_cmp(pW, pSignR)? ippDLValid : ippDLInvalidSignature; return ippStsNoErr; } } //#endif #else IPPFUN(IppStatus, ippsDLPVerifyDSA,(const IppsBigNumState* pMsgDigest, const IppsBigNumState* pSignR, const IppsBigNumState* pSignS, IppDLResult* pResult, IppsDLPState* pDL)) { /* test context*/ IPP_BAD_PTR2_RET(pDL,pResult); pDL = (IppsDLPState*)( IPP_ALIGNED_PTR(pDL, DLP_ALIGNMENT) ); IPP_BADARG_RET(!DLP_VALID_ID(pDL), ippStsContextMatchErr); /* test operation flag */ IPP_BADARG_RET(!DLP_COMPLETE(pDL), ippStsIncompleteContextErr); /* test message representative */ IPP_BAD_PTR1_RET(pMsgDigest); pMsgDigest = (IppsBigNumState*)( IPP_ALIGNED_PTR(pMsgDigest, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pMsgDigest), ippStsContextMatchErr); IPP_BADARG_RET((0>cpBN_tst(pMsgDigest)), ippStsMessageErr); /* make sure msg <order */ IPP_BADARG_RET(0<=cpCmp_BNU(BN_NUMBER(pMsgDigest), BN_SIZE(pMsgDigest), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL))), ippStsMessageErr); /* test signature */ IPP_BAD_PTR2_RET(pSignR,pSignS); pSignR = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignR, BN_ALIGNMENT) ); pSignS = (IppsBigNumState*)( IPP_ALIGNED_PTR(pSignS, BN_ALIGNMENT) ); IPP_BADARG_RET(!BN_VALID_ID(pSignR), ippStsContextMatchErr); IPP_BADARG_RET(!BN_VALID_ID(pSignS), ippStsContextMatchErr); /* test signature range */ if(0<cpBN_cmp(cpBN_OneRef(), pSignR)|| 0<=cpCmp_BNU(BN_NUMBER(pSignR),BN_SIZE(pSignR), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } if(0<cpBN_cmp(cpBN_OneRef(), pSignS)|| 0<=cpCmp_BNU(BN_NUMBER(pSignS),BN_SIZE(pSignS), DLP_R(pDL), BITS_BNU_CHUNK(DLP_BITSIZER(pDL)))) { *pResult = ippDLInvalidSignature; return ippStsNoErr; } { /* allocate BN resources */ BigNumNode* pList = DLP_BNCTX(pDL); IppsBigNumState* pV = cpBigNumListGet(&pList); IppsBigNumState* pW = cpBigNumListGet(&pList); IppsBigNumState* pU1 = cpBigNumListGet(&pList); IppsBigNumState* pU2 = cpBigNumListGet(&pList); IppsBigNumState* pOrder = cpBigNumListGet(&pList); ippsSet_BN(ippBigNumPOS, BITS2WORD32_SIZE(DLP_BITSIZER(pDL)), (Ipp32u*)DLP_R(pDL), pOrder); //int maxNumThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), 2); /* W = 1/SignS (mod R) */ ippsModInv_BN((IppsBigNumState*)pSignS, pOrder, pW); cpMontEnc_BN(pW, pW, DLP_MONTR(pDL)); /* reduct pMsgDigest if necessary */ if(0 < cpBN_cmp(pMsgDigest, pOrder)) ippsMod_BN((IppsBigNumState*)pMsgDigest, pOrder, pU1); else cpBN_copy(pU1, pMsgDigest); /* U1 = (MsgDigest*W) (mod R) */ cpMontMul_BN(pU1, pW, pU1, DLP_MONTR(pDL)); /* U2 = (SignR*W) (mod R) */ cpMontMul_BN(pU2, pSignR, pW, DLP_MONTR(pDL)); /* V = ((G^U1)*(Y^U2) (mod P)) (mod R) */ #pragma omp parallel sections IPPCP_OMP_LIMIT_MAX_NUM_THREADS(2) { /* W = (G^U1) (mod P) */ #pragma omp section { #if !defined(_USE_WINDOW_EXP_) //cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL)); cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) ); #else if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU1), BN_SIZE(pU1))))) //cpSafeMontExp_Binary(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL)); cpMontExpBin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL) ); else //cpSafeMontExp_Window(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL)); cpMontExpWin_BN(pW, DLP_GENC(pDL), pU1, DLP_MONTP0(pDL), DLP_BNUCTX0(pDL)); #endif } /* V = (Y^U2) (mod P) */ #pragma omp section { #if !defined(_USE_WINDOW_EXP_) //cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL)); cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) ); #else if((DLP_EXPMETHOD(pDL)==BINARY) || (1==cpMontExp_WinSize(BITSIZE_BNU(BN_NUMBER(pU2), BN_SIZE(pU2))))) //cpSafeMontExp_Binary(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL)); cpMontExpBin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL) ); else //cpSafeMontExp_Window(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL)); cpMontExpWin_BN(pV, DLP_YENC(pDL), pU2, DLP_MONTP1(pDL), DLP_BNUCTX1(pDL)); #endif } } cpMontMul_BN(pV, pW, pV, DLP_MONTP0(pDL)); cpMontDec_BN(pV, pV, DLP_MONTP0(pDL)); BN_SIZE(pV) = cpMod_BNU(BN_NUMBER(pV), BN_SIZE(pV), BN_NUMBER(pOrder), BN_SIZE(pOrder)); /* result = V~R */ *pResult = 0==cpBN_cmp(pV, pSignR)? ippDLValid : ippDLInvalidSignature; return ippStsNoErr; } } #endif /* _OPENMP */
Lab_2.1.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #define N 100 #define t 10e-6 #define e 10e-9 int main(int argc, char **argv) { if (argc != 2) { printf("invalid number of arguments\n"); return 0; } int number_of_processes = atoi(argv[1]); omp_set_num_threads(number_of_processes); double *matrix = (double*)malloc(N*N * sizeof(double)); double *b = (double*)malloc(N * sizeof(double)); double *x = (double*)malloc(N * sizeof(double)); double *tmp_x = (double*)malloc(N * sizeof(double)); for (int i = 0; i < N; ++i) { b[i] = N + 1; x[i] = 0; for (int j = 0; j < N; ++j) { if (i == j) { matrix[i*N + j] = 2.0; } else { matrix[i*N + j] = 1.0; } } } double start_time = omp_get_wtime(); double b_length = 0; #pragma omp parallel for reduction(+:b_length) for (int i = 0; i < N; ++i) { b_length += b[i] * b[i]; } b_length = sqrt(b_length); int is_calculation_complete = 1; while (is_calculation_complete) { double result_length = 0; #pragma omp parallel for reduction(+:result_length) for (int i = 0; i < N; ++i) { double process_x = 0; for (int j = 0; j < N; ++j) { process_x += matrix[i * N + j] * x[j]; //Ax } process_x = process_x - b[i]; //Ax - b tmp_x[i] = x[i] - process_x * t; //x^n=x-t(Ax-b) result_length += process_x * process_x; } for (int i = 0; i < N; ++i) { x[i] = tmp_x[i]; } result_length = sqrt(result_length); is_calculation_complete = (result_length / b_length >= e); } double finish_time = omp_get_wtime(); printf("Processes: %d; Matrix Size: %dx%d; Time: %lf\n", number_of_processes, N, N, (finish_time - start_time)); printf("Result: \n"); for (int i = 0; i < N; i++) { printf("%f ", x[i]); } printf("\n"); free(matrix); free(b); free(x); free(tmp_x); return 0; }
graph.h
#pragma once #include<vector> #include<algorithm> #include<queue> #include<stdlib.h> #include"config.h" #include"data.h" #include<random> #include<unordered_set> #include<mutex> #ifdef OMP #include<omp.h> #endif typedef unsigned int vl_type; class VisitedList { public: vl_type curV; vl_type *mass; unsigned int numelements; VisitedList(int numelements1) { curV = 1; numelements = numelements1; mass = new vl_type[numelements]; memset(mass, 0, sizeof(vl_type) * numelements); } void reset() { ++curV; if (curV == 0) { curV = 1; memset(mass, 0, sizeof(vl_type) * numelements); } }; ~VisitedList() { delete mass; } }; struct GraphMeasures{ int distance_cnt = 0; }; class GraphWrapper{ public: virtual void add_vertex(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point) = 0; virtual void add_vertex_lock(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point) = 0; virtual void search_top_k(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result) = 0; virtual void search_top_k_with_score(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result,std::vector<double>& score){} virtual void dump(std::string path = "bfsg.graph") = 0; virtual void load(std::string path = "bfsg.graph") = 0; virtual ~GraphWrapper(){} virtual void set_construct_pq_size(int size){}; GraphMeasures measures; }; template<const int dist_type> class FixedDegreeGraph : public GraphWrapper{ private: const int degree = SEARCH_DEGREE; const int flexible_degree = FIXED_DEGREE; const int vertex_offset_shift = FIXED_DEGREE_SHIFT; std::vector<idx_t> edges; std::vector<dist_t> edge_dist; Data* data; std::mt19937_64 rand_gen = std::mt19937_64(1234567);//std::random_device{}()); std::vector<std::mutex> edge_mutex;//do not push back on this vector, it will destroy the mutex bool debug = false; VisitedList* p_visited = NULL; #ifdef OMP std::vector<VisitedList*> visited_pool; #endif int construct_pq_size = CONSTRUCT_SEARCH_BUDGET; void rank_and_switch_ordered(idx_t v_id,idx_t u_id){ //We assume the neighbors of v_ids in edges[offset] are sorted //by the distance to v_id ascendingly when it is full //NOTICE: before it is full, it is unsorted auto curr_dist = pair_distance(v_id,u_id); auto offset = ((size_t)v_id) << vertex_offset_shift; int degree = edges[offset]; std::vector<idx_t> neighbor; neighbor.reserve(degree + 1); for(int i = 0;i < degree;++i) neighbor.push_back(edges[offset + i + 1]); neighbor.push_back(u_id); neighbor = edge_selection_filter_neighbor(neighbor,v_id,flexible_degree); edges[offset] = neighbor.size(); for(int i = 0;i < neighbor.size();++i) edges[offset + i + 1] = neighbor[i]; return; //We assert edges[offset] > 0 here if(curr_dist >= edge_dist[offset + edges[offset]]){ return; } edges[offset + edges[offset]] = u_id; edge_dist[offset + edges[offset]] = curr_dist; for(size_t i = offset + edges[offset] - 1;i > offset;--i){ if(edge_dist[i] > edge_dist[i + 1]){ std::swap(edges[i],edges[i + 1]); std::swap(edge_dist[i],edge_dist[i + 1]); }else{ break; } } } void rank_and_switch(idx_t v_id,idx_t u_id){ rank_and_switch_ordered(v_id,u_id); //TODO: //Implement an unordered version to compare with } template<class T> dist_t distance(idx_t a,T& b){ if(dist_type == 0) return data->l2_distance(a,b); else if(dist_type == 1) return data->negative_inner_prod_distance(a,b); else if(dist_type == 2) return data->negative_cosine_distance(a,b); else if(dist_type == 3) return data->l2_distance(a,b); else if(dist_type == 4) return data->ipwrap_l2_build_distance(a,b); else if(dist_type == 5) return data->ipwrap_l2_query_distance(a,b); else{ // should not happen fprintf(stderr,"unsupported dist_type %d\n",dist_type); return 0; } } void compute_distance_naive(size_t offset,std::vector<dist_t>& dists){ dists.resize(edges[offset]); auto degree = edges[offset]; for(int i = 0;i < degree;++i){ dists[i] = distance(offset >> vertex_offset_shift,edges[offset + i + 1]); } } void compute_distance(size_t offset,std::vector<dist_t>& dists){ compute_distance_naive(offset,dists); } template<class T> dist_t pair_distance_naive(idx_t a,T& b){ ++measures.distance_cnt; return distance(a,b); } template<class T> dist_t pair_distance(idx_t a,T& b){ return pair_distance_naive(a,b); } void qsort(size_t l,size_t r){ auto mid = (l + r) >> 1; int i = l,j = r; auto k = edge_dist[mid]; do{ while(edge_dist[i] < k) ++i; while(k < edge_dist[j]) --j; if(i <= j){ std::swap(edge_dist[i],edge_dist[j]); std::swap(edges[i],edges[j]); ++i; --j; } }while(i <= j); if(i < r)qsort(i,r); if(l < j)qsort(l,j); } void rank_edges(size_t offset){ std::vector<dist_t> dists; compute_distance(offset,dists); for(int i = 0;i < dists.size();++i) edge_dist[offset + i + 1] = dists[i]; qsort(offset + 1,offset + dists.size()); //TODO: //use a heap in the edge_dist } void add_edge_lock(idx_t v_id,idx_t u_id){ edge_mutex[v_id].lock(); auto offset = ((size_t)v_id) << vertex_offset_shift; if(edges[offset] < flexible_degree){ ++edges[offset]; edges[offset + edges[offset]] = u_id; }else{ rank_and_switch(v_id,u_id); } edge_mutex[v_id].unlock(); } void add_edge(idx_t v_id,idx_t u_id){ auto offset = ((size_t)v_id) << vertex_offset_shift; if(edges[offset] < flexible_degree){ ++edges[offset]; edges[offset + edges[offset]] = u_id; }else{ rank_and_switch(v_id,u_id); } } public: long long total_explore_cnt = 0; int total_explore_times = 0; size_t search_start_point = 0; bool ignore_startpoint = false; FixedDegreeGraph(Data* data) : data(data){ auto num_vertices = data->max_vertices(); edges = std::vector<idx_t>(((size_t)num_vertices) << vertex_offset_shift); edge_dist = std::vector<dist_t>(((size_t)num_vertices) << vertex_offset_shift); edge_mutex = std::vector<std::mutex>(num_vertices); p_visited = new VisitedList(num_vertices + 5); #ifdef OMP int n_threads = 1; #pragma omp parallel #pragma omp master { n_threads = omp_get_num_threads(); } visited_pool.resize(n_threads); for(int i = 0;i < n_threads;++i) visited_pool[i] = new VisitedList(num_vertices + 5); #endif } void set_construct_pq_size(int size){ construct_pq_size = size; } std::vector<idx_t> edge_selection_filter_neighbor(std::vector<idx_t>& neighbor,idx_t vertex_id,int desired_size){ std::vector<idx_t> filtered_neighbor; std::vector<dist_t> dists(neighbor.size()); for(int i = 0;i < dists.size();++i) dists[i] = pair_distance(vertex_id,neighbor[i]); std::vector<int> idx(neighbor.size()); for(int i = 0;i < idx.size();++i) idx[i] = i; std::sort(idx.begin(),idx.end(),[&](int a,int b){return dists[a] < dists[b];}); for(int i = 0;i < idx.size();++i){ dist_t cur_dist = dists[idx[i]]; bool pass = true; for(auto neighbor_id : filtered_neighbor){ if(cur_dist > pair_distance(neighbor_id,neighbor[idx[i]])){ pass = false; break; } } if(pass){ filtered_neighbor.push_back(neighbor[idx[i]]); if(filtered_neighbor.size() >= desired_size) break; }else{ } } return std::move(filtered_neighbor); } void add_vertex_lock(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point){ std::vector<idx_t> neighbor; search_top_k_lock(point,construct_pq_size,neighbor); auto offset = ((size_t)vertex_id) << vertex_offset_shift; int num_neighbors = degree < neighbor.size() ? degree : neighbor.size(); edge_mutex[vertex_id].lock(); // TODO: // it is possible to save this space --- edges[offset] // by set the last number in the range as // a large number - current degree if(neighbor.size() >= degree) neighbor = edge_selection_filter_neighbor(neighbor,vertex_id,degree); edges[offset] = neighbor.size(); for(int i = 0;i < neighbor.size() && i < degree;++i){ edges[offset + i + 1] = neighbor[i]; } edge_mutex[vertex_id].unlock(); for(int i = 0;i < neighbor.size() && i < degree;++i){ add_edge_lock(neighbor[i],vertex_id); } } void add_vertex(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point){ std::vector<idx_t> neighbor; search_top_k(point,construct_pq_size,neighbor); auto offset = ((size_t)vertex_id) << vertex_offset_shift; int num_neighbors = degree < neighbor.size() ? degree : neighbor.size(); // TODO: // it is possible to save this space --- edges[offset] // by set the last number in the range as // a large number - current degree if(neighbor.size() >= degree){ neighbor = edge_selection_filter_neighbor(neighbor,vertex_id,degree); } edges[offset] = neighbor.size(); for(int i = 0;i < neighbor.size() && i < degree;++i){ edges[offset + i + 1] = neighbor[i]; } for(int i = 0;i < neighbor.size() && i < degree;++i){ add_edge(neighbor[i],vertex_id); } } void astar_multi_start_search_lock(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ std::priority_queue<std::pair<dist_t,idx_t>,std::vector<std::pair<dist_t,idx_t>>,std::greater<std::pair<dist_t,idx_t>>> q; const int num_start_point = 1; auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); #ifdef OMP int tid = omp_get_thread_num(); auto& p_visited = visited_pool[tid]; #endif p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); } std::priority_queue<std::pair<dist_t,idx_t>> topk; const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step && !q.empty();++iter){ auto now = q.top(); if(topk.size() == k && topk.top().first < now.first){ break; } ++explore_cnt; min_dist = std::min(min_dist,now.first); q.pop(); if(ignore_startpoint == false || iter != 0) topk.push(now); if(topk.size() > k) topk.pop(); edge_mutex[now.second].lock(); auto offset = ((size_t)now.second) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(topk.empty() || dist < topk.top().first || topk.size() < k) q.push(std::make_pair(dist,start)); } edge_mutex[now.second].unlock(); } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(topk.size()); int i = result.size() - 1; while(!topk.empty()){ result[i] = (topk.top().second); topk.pop(); --i; } } void astar_no_heap_search(const std::vector<std::pair<int,value_t>>& query,std::vector<idx_t>& result){ const int num_start_point = 1; std::pair<dist_t,idx_t> q_top = std::make_pair(10000000000,0); auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); p_visited->mass[start] = tag; if(ignore_startpoint == false){ q_top = (std::make_pair(pair_distance_naive(start,converted_query),start)); }else{ auto offset = ((size_t)start) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 1;i <= degree;++i){ p_visited->mass[edges[offset + i]] = tag; auto dis = pair_distance_naive(edges[offset + i],converted_query); if(dis < q_top.first) q_top = (std::make_pair(dis,start)); } } } const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step;++iter){ ++explore_cnt; auto offset = ((size_t)q_top.second) << vertex_offset_shift; auto degree = edges[offset]; bool changed = false; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(dist < q_top.first){ q_top = (std::make_pair(dist,start)); changed = true; } } if(changed == false) break; } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(1); result[0] = q_top.second; } void astar_multi_start_search_with_score(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result,std::vector<double>& score){ std::priority_queue<std::pair<dist_t,idx_t>,std::vector<std::pair<dist_t,idx_t>>,std::greater<std::pair<dist_t,idx_t>>> q; const int num_start_point = 1; auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); } std::priority_queue<std::pair<dist_t,idx_t>> topk; const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step && !q.empty();++iter){ auto now = q.top(); if(topk.size() == k && topk.top().first < now.first){ break; } ++explore_cnt; min_dist = std::min(min_dist,now.first); q.pop(); if(ignore_startpoint == false || iter != 0) topk.push(now); if(topk.size() > k) topk.pop(); auto offset = ((size_t)now.second) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(topk.empty() || dist < topk.top().first || topk.size() < k) q.push(std::make_pair(dist,start)); } } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(topk.size()); score.resize(topk.size()); int i = result.size() - 1; while(!topk.empty()){ result[i] = (topk.top().second); score[i] = -(topk.top().first); topk.pop(); --i; } } void astar_multi_start_search(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ std::priority_queue<std::pair<dist_t,idx_t>,std::vector<std::pair<dist_t,idx_t>>,std::greater<std::pair<dist_t,idx_t>>> q; const int num_start_point = 1; auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); } std::priority_queue<std::pair<dist_t,idx_t>> topk; const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step && !q.empty();++iter){ auto now = q.top(); if(topk.size() == k && topk.top().first < now.first){ break; } ++explore_cnt; min_dist = std::min(min_dist,now.first); q.pop(); if(ignore_startpoint == false || iter != 0) topk.push(now); if(topk.size() > k) topk.pop(); auto offset = ((size_t)now.second) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(topk.empty() || dist < topk.top().first || topk.size() < k) q.push(std::make_pair(dist,start)); } } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(topk.size()); int i = result.size() - 1; while(!topk.empty()){ result[i] = (topk.top().second); topk.pop(); --i; } } void search_top_k(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ if(k == 1) astar_no_heap_search(query,result); else astar_multi_start_search(query,k,result); } void search_top_k_with_score(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result,std::vector<double>& score){ astar_multi_start_search_with_score(query,k,result,score); } void search_top_k_lock(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ astar_multi_start_search_lock(query,k,result); } void print_stat(){ auto n = data->max_vertices(); size_t sum = 0; std::vector<size_t> histogram(2 * degree + 1,0); for(size_t i = 0;i < n;++i){ sum += edges[i << vertex_offset_shift]; int tmp = edges[i << vertex_offset_shift]; if(tmp > 2 * degree + 1) fprintf(stderr,"[ERROR] node %zu has %d degree\n",i,tmp); ++histogram[edges[i << vertex_offset_shift]]; if(tmp != degree) fprintf(stderr,"[INFO] %zu has degree %d\n",i,tmp); } fprintf(stderr,"[INFO] #vertices %zu, avg degree %f\n",n,sum * 1.0 / n); std::unordered_set<idx_t> visited; fprintf(stderr,"[INFO] degree histogram:\n"); for(int i = 0;i <= 2 * degree + 1;++i) fprintf(stderr,"[INFO] %d:\t%zu\n",i,histogram[i]); } void print_edges(int x){ for(size_t i = 0;i < x;++i){ size_t offset = i << vertex_offset_shift; int degree = edges[offset]; fprintf(stderr,"%d (%d): ",i,degree); for(int j = 1;j <= degree;++j) fprintf(stderr,"(%zu,%f) ",edges[offset + j],edge_dist[offset + j]); fprintf(stderr,"\n"); } } void dump(std::string path = "bfsg.graph"){ FILE* fp = fopen(path.c_str(),"wb"); size_t num_vertices = data->max_vertices(); fwrite(&edges[0],sizeof(edges[0]) * (num_vertices << vertex_offset_shift),1,fp); fclose(fp); } void load(std::string path = "bfsg.graph"){ FILE* fp = fopen(path.c_str(),"rb"); size_t num_vertices = data->max_vertices(); auto cnt = fread(&edges[0],sizeof(edges[0]) * (num_vertices << vertex_offset_shift),1,fp); fclose(fp); } Data* get_data(){ return data; } };
sgemm.c
//Tencent is pleased to support the open source community by making FeatherCNN available. //Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. //Licensed under the BSD 3-Clause License (the "License"); you may not use this file except //in compliance with the License. You may obtain a copy of the License at // //https://opensource.org/licenses/BSD-3-Clause // //Unless required by applicable law or agreed to in writing, software distributed //under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR //CONDITIONS OF ANY KIND, either express or implied. See the License for the //specific language governing permissions and limitations under the License. #include "tinyMatrixMul.h" #include <arm_neon.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include "common.h" #ifdef __APPLE__ #else #include <omp.h> #endif const int mc = 1024; const int kc = 256; const int nc = 256; #define min(a,b) (((a) > (b))?(b):(a)) void sgemm_4x1( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_4x2( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_4x3( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_4x4( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_4x5( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_4x6( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_4x7( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_8x1( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_8x2( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_8x3( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_8x4( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_8x5( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_8x6( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void sgemm_8x7( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ); void (*sgemm_tiny_scale)(int L, float *a, int lda, float *b, int ldb, float *c, int ldc) = NULL; void internalPackA1(int L, float* packA, float* a, int lda); void internalPackA2(int L, float* packA, float* a, int lda); void internalPackA3(int L, float* packA, float* a, int lda); void internalPackA4(int L, float* packA, float* a, int lda); void internalPackA8(int L, float* packA, float* a, int lda); void (*internalPackA)(int L, float* packA, float* a, int lda) = NULL; void internalPackB4(int L, float* packA, float* a, int lda); void internalPackB8(int L, float* packA, float* a, int lda); void SGEBP_externalPackA_tiny_scale( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB); void SGEBP_externalPackA_tiny_scale_8x8( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB); void sgemm_4x4_pack( int, float *, int, float *, int, float *, int ); void sgemm_4x8_pack( int, float *, int, float *, int, float *, int ); void sgemm_8x8_pack( int, float *, int, float *, int, float *, int ); void SGEBP_internal_pack(int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB); void SGEBP_internal_pack_8x8(int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB); void block_sgemm_internal_pack( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc); void block_sgemm_pack( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc); void block_sgemm_pack_8x8( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc); void block_sgemm_pack_8x8_thread( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, int num_threads); void externalPackA8(int M, int L, float* packA, float* a, int lda){ float* packAptr = packA; int eM = M + (8 - M % 8) % 8; for(int i = 0; i < eM; i += mc){ const int ib = min(eM - i, mc); for(int p = 0; p < L; p += kc){ const int pb = min(L - p, kc); for(int k = 0; k < ib; k += 8){ internalPackA8(pb, packAptr, a + i * lda + p + k * lda, lda); packAptr += 8 * pb; } } } } /* * Row major GEneral Matrix Multiplication that fits in any dimension by padding. */ void block_sgemm_external_pack( int M, int N, int L, float *a, float *b, float *c){ int eM = M + (4 - M % 4) % 4; switch(N % 8){ case 1: sgemm_tiny_scale = sgemm_4x1; break; case 2: sgemm_tiny_scale = sgemm_4x2; break; case 3: sgemm_tiny_scale = sgemm_4x3; break; case 4: sgemm_tiny_scale = sgemm_4x4; break; case 5: sgemm_tiny_scale = sgemm_4x5; break; case 6: sgemm_tiny_scale = sgemm_4x6; break; case 7: sgemm_tiny_scale = sgemm_4x7; break; } block_sgemm_pack(eM, N, L, a, L, b, N, c, N); } void block_sgemm_external_pack_threading( int M, int N, int L, float *a, float *b, float *c, int num_threads){ //printf("%s %d: [%d %d %d %d]\n", __func__, __LINE__, M, N, L, num_threads); int eM = M + (4 - M % 4) % 4; switch(N % 8){ case 1: sgemm_tiny_scale = sgemm_4x1; break; case 2: sgemm_tiny_scale = sgemm_4x2; break; case 3: sgemm_tiny_scale = sgemm_4x3; break; case 4: sgemm_tiny_scale = sgemm_4x4; break; case 5: sgemm_tiny_scale = sgemm_4x5; break; case 6: sgemm_tiny_scale = sgemm_4x6; break; case 7: sgemm_tiny_scale = sgemm_4x7; break; } const int factor = 1; int tN = N / num_threads / factor; tN = tN + (8 - tN % 8) % 8; if (num_threads == 1 || N <= 8 || N - (num_threads * factor - 1) * tN <= 0){ block_sgemm_pack(eM, N, L, a, L, b, N, c, N); } else { //#pragma parallel for num_threads(num_threads) #pragma omp parallel for num_threads(num_threads) for(int i = 0; i < num_threads * factor; ++i){ int sN = (tN < N - i * tN) ? tN : N - i * tN; block_sgemm_pack(eM, sN, L, a, L, b + i * tN, N, c + i * tN, N); } } } void block_sgemm_external_pack_threading_8x8( int M, int N, int L, float *a, float *b, float *c, int num_threads){ int eM = M + (8 - M % 8) % 8; switch(N % 8){ case 1: sgemm_tiny_scale = sgemm_8x1; break; case 2: sgemm_tiny_scale = sgemm_8x2; break; case 3: sgemm_tiny_scale = sgemm_8x3; break; case 4: sgemm_tiny_scale = sgemm_8x4; break; case 5: sgemm_tiny_scale = sgemm_8x5; break; case 6: sgemm_tiny_scale = sgemm_8x6; break; case 7: sgemm_tiny_scale = sgemm_8x7; break; } const int factor = 1; unsigned int tN = N / num_threads / factor; //tN = tN + (8 - tN % 8) % 8; tN = (tN + 7) & 0xFFFFFFF8; //printf("tN %d, N %d, num_threads %d\n", tN, N, num_threads); #ifdef __APPLE__ block_sgemm_pack_8x8_thread(eM, N, L, a, L, b, N, c, N, num_threads); #else if (num_threads == 1 || N <= 8 || N - (num_threads * factor - 1) * tN <= 0) { block_sgemm_pack_8x8_thread(eM, N, L, a, L, b, N, c, N, num_threads); } else { #if 0 #pragma omp parallel for num_threads(num_threads) schedule(static) for(int i = 0; i < num_threads * factor; ++i){ //int sN = (tN < N - i * tN) ? tN : N - i * tN; int sN = tN; if(i == num_threads * factor - 1) sN = N - i * tN; //printf("sN %d\n", sN); block_sgemm_pack_8x8(eM, sN, L, a, L, b + i * tN, N, c + i * tN, N); } #else #pragma omp parallel num_threads(num_threads) { int tid = omp_get_thread_num(); #if 0 int sN = (tN < (N - tid * tN)) ? tN : N - tid * tN; #else int sN = tN; if(tid == num_threads - 1) sN = N - tid * tN; #endif block_sgemm_pack_8x8(eM, sN, L, a, L, b + tid * tN, N, c + tid * tN, N); } #endif } #endif } void block_sgemm( int M, int N, int L, float *a, float *b, float *c){ switch(N % 8){ case 1: sgemm_tiny_scale = sgemm_4x1; break; case 2: sgemm_tiny_scale = sgemm_4x2; break; case 3: sgemm_tiny_scale = sgemm_4x3; break; case 4: sgemm_tiny_scale = sgemm_4x4; break; case 5: sgemm_tiny_scale = sgemm_4x5; break; case 6: sgemm_tiny_scale = sgemm_4x6; break; case 7: sgemm_tiny_scale = sgemm_4x7; break; } switch(M % 4){ case 0: internalPackA = internalPackA4; break; case 1: internalPackA = internalPackA1; break; case 2: internalPackA = internalPackA2; break; case 3: internalPackA = internalPackA3; break; } block_sgemm_internal_pack(M, N, L, a, L, b, N, c, N); } void SGEBP_externalPackA_tiny_scale( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB){ //Align L to achieve better performance for better cache line alignment. int eL = L + (4 - L % 4) % 4; int remN = N % 8; int fN = N - remN; for(int i=0; i<M; i+=4 ){ for(int j=0; j<fN; j+=8 ){ if(i == 0) internalPackB8(L, packB + j * eL, b + j, ldb); sgemm_4x8_pack(L, a + i * L, lda, packB + j * eL, 8, c + i * ldc + j, ldc); } if(remN) sgemm_tiny_scale(L, a + i * L, lda, b + fN, ldb, c + i * ldc + fN, ldc); } } void SGEBP_externalPackA_tiny_scale_8x8( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB){ //Align L to achieve better performance for better cache line alignment. int eL = L + (4 - L % 4) % 4; int remN = N % 8; int fN = N - remN; for(int i=0; i<M; i+=8 ){ for(int j=0; j<fN; j+=8 ){ if(i == 0) internalPackB8(L, packB + j * eL, b + j, ldb); sgemm_8x8_pack(L, a + i * L, lda, packB + j * eL, 8, c + i * ldc + j, ldc); } if(remN) sgemm_tiny_scale(L, a + i * L, lda, b + fN, ldb, c + i * ldc + fN, ldc); } } void SGEBP_internalPack_tiny_scale( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB){ //Align L for better cache line alignment. int eL = L + (4 - L % 4) % 4; int remM = M % 4; int remN = N % 8; int fM = M - remM; int fN = N - remN; for(int i=0; i<fM; i+=8 ){ internalPackA4(L, packA + i * eL, a + i * lda, lda); for(int j=0; j<fN; j+=8 ){ if(i == 0) internalPackB8(L, packB + j * eL, b + j, ldb); sgemm_8x8_pack(L, packA + i * eL, lda, packB + j * eL, 8, c + i * ldc + j, ldc); } if(remN) sgemm_tiny_scale(L, packA + i * eL, lda, b + fN, ldb, c + i * ldc + fN, ldc); } //Compute last row in A if(remM){ internalPackA(L, packA + fM * eL, a + fM * lda, lda); for(int j=0; j<fN; j+=8 ){ sgemm_8x8_pack(L, packA + fM * eL, lda, packB + j * eL, 8, c + fM * ldc + j, ldc); } if(remN) sgemm_tiny_scale(L, packA + fM * eL, lda, b + fN, ldb, c + fM * ldc + fN, ldc); } } void block_sgemm_pack(int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ /* ldc N*/ for(int i = 0; i < M; ++i){ memset(c + ldc * i, 0, sizeof(float) * N); } float* packB = (float *)_mm_malloc(sizeof(float) * kc * N, 16); if (NULL == packB) { return; } /* const int mc = 1024; const int kc = 256; const int nc = 256; */ for(int l = 0; l < N; l += nc){ int lb = min(N - l, nc); float* packAptr = a; for(int i = 0; i < M; i += mc){ int ib = min(M - i, mc); for(int p = 0; p < L; p += kc){ int pb = min(L - p, kc); SGEBP_externalPackA_tiny_scale(ib, lb, pb, packAptr, lda, b + p * ldb + l, ldb, c + i * ldc + l, ldc, NULL, packB); packAptr += ib * pb; } } } _mm_free(packB); } //double check here void block_sgemm_pack_8x8_thread( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, int num_threads){ for(int i = 0; i < M; ++i){ memset(c + ldc * i, 0, sizeof(float) * N); } float* packB = (float *)_mm_malloc(sizeof(float) * kc * N, 16); if (NULL == packB) { return; } //#pragma omp parallel for num_threads(num_threads) for(int l = 0; l < N; l += nc){ float* packAptr = a; for(int i = 0; i < M; i += mc){ for(int p = 0; p < L; p += kc){ int lb = min(N - l, nc); int ib = min(M - i, mc); int pb = min(L - p, kc); SGEBP_externalPackA_tiny_scale_8x8(ib, lb, pb, packAptr, lda, b + p * ldb + l, ldb, c + i * ldc + l, ldc, NULL, packB); packAptr += ib * pb; } } } _mm_free(packB); } void block_sgemm_pack_8x8( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ for(int i = 0; i < M; ++i){ memset(c + ldc * i, 0, sizeof(float) * N); } float* packB = (float *)_mm_malloc(sizeof(float) * kc * N, 16); if (NULL == packB) { return; } for(int l = 0; l < N; l += nc){ float* packAptr = a; for(int i = 0; i < M; i += mc){ //float* packAptr2 = packAptr; for(int p = 0; p < L; p += kc){ int lb = min(N - l, nc); int ib = min(M - i, mc); int pb = min(L - p, kc); SGEBP_externalPackA_tiny_scale_8x8(ib, lb, pb, packAptr, lda, b + p * ldb + l, ldb, c + i * ldc + l, ldc, NULL, packB); packAptr += ib * pb; } } } _mm_free(packB); } void block_sgemm_internal_pack( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ int eM = M + (4 - M % 4) % 4; for(int i = 0; i < M; ++i){ memset(c + ldc * i, 0, sizeof(float) * N); } float* packA = (float *)_mm_malloc(sizeof(float) * kc * eM, 16); if (NULL == packA) { return; } float* packB = (float *)_mm_malloc(sizeof(float) * kc * N, 16); if (NULL == packB) { _mm_free(packA); return; } for(int l = 0; l < N; l += nc){ int lb = min(N - l, nc); for(int i = 0; i < M; i += mc){ int ib = min(M - i, mc); for(int p = 0; p < L; p += kc){ int pb = min(L - p, kc); SGEBP_internalPack_tiny_scale(ib, lb, pb, a + i * lda + p, lda, b + p * ldb + l, ldb, c + i * ldc + l, ldc, packA, packB); } } } _mm_free(packA); _mm_free(packB); } void SGEBP_internal_pack( int M, int N, int L, float *a, int lda, float *b, int ldb, float *c, int ldc, float* packA, float* packB) { for(int i=0; i<M; i+=4 ){ internalPackA4(L, packA + i * L, a + i * lda, lda); for(int j=0; j<N; j+=8 ){ if(i == 0) internalPackB8(L, packB + j * L, b + j, ldb); sgemm_4x8_pack(L, packA + i * L, lda, packB + j * L, 8, c + i * ldc + j, ldc); } } } /* * Computes a 4x4 block. */ void sgemm_4x4_pack( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ) { float *aptr = a; float *bptr = b; float *cptr = c; float32x4_t vb; float32x4_t va0, va1, va2, va3; float32x4_t vc0 = vld1q_f32(cptr); cptr += ldc; float32x4_t vc1 = vld1q_f32(cptr); cptr += ldc; float32x4_t vc2 = vld1q_f32(cptr); cptr += ldc; float32x4_t vc3 = vld1q_f32(cptr); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); va0 = vld1q_dup_f32(aptr); va1 = vld1q_dup_f32(aptr + 1); va2 = vld1q_dup_f32(aptr + 2); va3 = vld1q_dup_f32(aptr + 3); #if __aarch64__ vc0 = vfmaq_f32(vc0, va0, vb); vc1 = vfmaq_f32(vc1, va1, vb); vc2 = vfmaq_f32(vc2, va2, vb); vc3 = vfmaq_f32(vc3, va3, vb); #else vc0 = vmlaq_f32(vc0, va0, vb); vc1 = vmlaq_f32(vc1, va1, vb); vc2 = vmlaq_f32(vc2, va2, vb); vc3 = vmlaq_f32(vc3, va3, vb); #endif // __aarch64__ bptr += 4; aptr += 4; } cptr = c; vst1q_f32(cptr, vc0); cptr+=ldc; vst1q_f32(cptr, vc1); cptr+=ldc; vst1q_f32(cptr, vc2); cptr+=ldc; vst1q_f32(cptr, vc3); } inline void sgemm_8x8_pack( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ) { float *aptr = a; float *bptr = b; float *cptr = c; float32x4_t vb0, vb1; float32x4_t va0, va1; float32x4_t vc0 = vld1q_f32(cptr); float32x4_t vc8 = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc1 = vld1q_f32(cptr); float32x4_t vc9 = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc2 = vld1q_f32(cptr); float32x4_t vcA = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc3 = vld1q_f32(cptr); float32x4_t vcB = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc4 = vld1q_f32(cptr); float32x4_t vcC = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc5 = vld1q_f32(cptr); float32x4_t vcD = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc6 = vld1q_f32(cptr); float32x4_t vcE = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc7 = vld1q_f32(cptr); float32x4_t vcF = vld1q_f32(cptr + 4); for(int p = 0; p < L; ++p){ vb0 = vld1q_f32(bptr); vb1 = vld1q_f32(bptr + 4); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); #if __aarch64__ vc0 = vfmaq_laneq_f32(vc0, vb0, va0, 0); vc1 = vfmaq_laneq_f32(vc1, vb0, va0, 1); vc2 = vfmaq_laneq_f32(vc2, vb0, va0, 2); vc3 = vfmaq_laneq_f32(vc3, vb0, va0, 3); vc4 = vfmaq_laneq_f32(vc4, vb0, va1, 0); vc5 = vfmaq_laneq_f32(vc5, vb0, va1, 1); vc6 = vfmaq_laneq_f32(vc6, vb0, va1, 2); vc7 = vfmaq_laneq_f32(vc7, vb0, va1, 3); vc8 = vfmaq_laneq_f32(vc8, vb1, va0, 0); vc9 = vfmaq_laneq_f32(vc9, vb1, va0, 1); vcA = vfmaq_laneq_f32(vcA, vb1, va0, 2); vcB = vfmaq_laneq_f32(vcB, vb1, va0, 3); vcC = vfmaq_laneq_f32(vcC, vb1, va1, 0); vcD = vfmaq_laneq_f32(vcD, vb1, va1, 1); vcE = vfmaq_laneq_f32(vcE, vb1, va1, 2); vcF = vfmaq_laneq_f32(vcF, vb1, va1, 3); #else vc0 = vmlaq_f32(vc0, vb0, vld1q_dup_f32(aptr + 0)); vc1 = vmlaq_f32(vc1, vb0, vld1q_dup_f32(aptr + 1)); vc2 = vmlaq_f32(vc2, vb0, vld1q_dup_f32(aptr + 2)); vc3 = vmlaq_f32(vc3, vb0, vld1q_dup_f32(aptr + 3)); vc4 = vmlaq_f32(vc4, vb0, vld1q_dup_f32(aptr + 4)); vc5 = vmlaq_f32(vc5, vb0, vld1q_dup_f32(aptr + 5)); vc6 = vmlaq_f32(vc6, vb0, vld1q_dup_f32(aptr + 6)); vc7 = vmlaq_f32(vc7, vb0, vld1q_dup_f32(aptr + 7)); vc8 = vmlaq_f32(vc8, vb1, vld1q_dup_f32(aptr + 0)); vc9 = vmlaq_f32(vc9, vb1, vld1q_dup_f32(aptr + 1)); vcA = vmlaq_f32(vcA, vb1, vld1q_dup_f32(aptr + 2)); vcB = vmlaq_f32(vcB, vb1, vld1q_dup_f32(aptr + 3)); vcC = vmlaq_f32(vcC, vb1, vld1q_dup_f32(aptr + 4)); vcD = vmlaq_f32(vcD, vb1, vld1q_dup_f32(aptr + 5)); vcE = vmlaq_f32(vcE, vb1, vld1q_dup_f32(aptr + 6)); vcF = vmlaq_f32(vcF, vb1, vld1q_dup_f32(aptr + 7)); #endif // __aarch64__ bptr += 8; aptr += 8; } cptr = c; vst1q_f32(cptr, vc0); vst1q_f32(cptr + 4, vc8); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_f32(cptr + 4, vc9); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_f32(cptr + 4, vcA); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_f32(cptr + 4, vcB); cptr+=ldc; vst1q_f32(cptr, vc4); vst1q_f32(cptr + 4, vcC); cptr+=ldc; vst1q_f32(cptr, vc5); vst1q_f32(cptr + 4, vcD); cptr+=ldc; vst1q_f32(cptr, vc6); vst1q_f32(cptr + 4, vcE); cptr+=ldc; vst1q_f32(cptr, vc7); vst1q_f32(cptr + 4, vcF); } void sgemm_4x8_pack( int L, float *a, int lda, float *b, int ldb, float *c, int ldc ) { float *aptr = a; float *bptr = b; float *cptr = c; float32x4_t vb1, vb2; float32x4_t va0, va1, va2, va3; float32x4_t vc0 = vld1q_f32(cptr); float32x4_t vc4 = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc1 = vld1q_f32(cptr); float32x4_t vc5 = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc2 = vld1q_f32(cptr); float32x4_t vc6 = vld1q_f32(cptr + 4); cptr += ldc; float32x4_t vc3 = vld1q_f32(cptr); float32x4_t vc7 = vld1q_f32(cptr + 4); for(int p = 0; p < L; ++p){ vb1 = vld1q_f32(bptr); vb2 = vld1q_f32(bptr + 4); va0 = vld1q_dup_f32(aptr); va1 = vld1q_dup_f32(aptr + 1); va2 = vld1q_dup_f32(aptr + 2); va3 = vld1q_dup_f32(aptr + 3); #if __aarch64__ vc0 = vfmaq_f32(vc0, va0, vb1); vc1 = vfmaq_f32(vc1, va1, vb1); vc2 = vfmaq_f32(vc2, va2, vb1); vc3 = vfmaq_f32(vc3, va3, vb1); vc4 = vfmaq_f32(vc4, va0, vb2); vc5 = vfmaq_f32(vc5, va1, vb2); vc6 = vfmaq_f32(vc6, va2, vb2); vc7 = vfmaq_f32(vc7, va3, vb2); #else vc0 = vmlaq_f32(vc0, va0, vb1); vc1 = vmlaq_f32(vc1, va1, vb1); vc2 = vmlaq_f32(vc2, va2, vb1); vc3 = vmlaq_f32(vc3, va3, vb1); vc4 = vmlaq_f32(vc4, va0, vb2); vc5 = vmlaq_f32(vc5, va1, vb2); vc6 = vmlaq_f32(vc6, va2, vb2); vc7 = vmlaq_f32(vc7, va3, vb2); #endif // __aarch64__ bptr += 8; aptr += 4; } cptr = c; vst1q_f32(cptr, vc0); vst1q_f32(cptr + 4, vc4); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_f32(cptr + 4, vc5); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_f32(cptr + 4, vc6); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_f32(cptr + 4, vc7); } void externalPackA(int M, int L, float* packA, float* a, int lda){ float* packAptr = packA; int remM = M % 4; int eM = M + (4 - M % 4) % 4;//Ceil void (*remPack)(int, float*, float*, int) = NULL; switch(remM){ case 0: remPack = internalPackA4; break; case 1: remPack = internalPackA1; break; case 2: remPack = internalPackA2; break; case 3: remPack = internalPackA3; break; } for(int i = 0; i < eM; i += mc){ const int ib = min(eM - i, mc); for(int p = 0; p < L; p += kc){ const int pb = min(L - p, kc); // printf("p %d pb %d\n", p, pb); for(int k = 0; k < ib -4; k += 4){ internalPackA4(pb, packAptr, a + i * lda + p + k * lda, lda); packAptr += 4 * pb; } remPack(pb, packAptr, a + i * lda + p + (ib - 4) * lda, lda); packAptr += 4 * pb; } } } void internalPackA8(int L, float* packA, float* a, int lda){ float *packAptr = packA; float *a_p0_ptr, *a_p1_ptr, *a_p2_ptr, *a_p3_ptr; float *a_p4_ptr, *a_p5_ptr, *a_p6_ptr, *a_p7_ptr; a_p0_ptr = a; a_p1_ptr = a + lda; a_p2_ptr = a + lda * 2; a_p3_ptr = a + lda * 3; a_p4_ptr = a + lda * 4; a_p5_ptr = a + lda * 5; a_p6_ptr = a + lda * 6; a_p7_ptr = a + lda * 7; for(int i = 0; i < L; ++i){ *packAptr++ = *a_p0_ptr++; *packAptr++ = *a_p1_ptr++; *packAptr++ = *a_p2_ptr++; *packAptr++ = *a_p3_ptr++; *packAptr++ = *a_p4_ptr++; *packAptr++ = *a_p5_ptr++; *packAptr++ = *a_p6_ptr++; *packAptr++ = *a_p7_ptr++; } } void internalPackA4(int L, float* packA, float* a, int lda){ float *packAptr = packA; float *a_p0_ptr, *a_p1_ptr, *a_p2_ptr, *a_p3_ptr; a_p0_ptr = a; a_p1_ptr = a + lda; a_p2_ptr = a + lda * 2; a_p3_ptr = a + lda * 3; for(int i = 0; i < L; ++i){ *packAptr++ = *a_p0_ptr++; *packAptr++ = *a_p1_ptr++; *packAptr++ = *a_p2_ptr++; *packAptr++ = *a_p3_ptr++; } } void internalPackA3(int L, float* packA, float* a, int lda){ float *packAptr = packA; float *a_p0_ptr, *a_p1_ptr, *a_p2_ptr; a_p0_ptr = a; a_p1_ptr = a + lda; a_p2_ptr = a + lda * 2; for(int i = 0; i < L; ++i){ *packAptr++ = *a_p0_ptr++; *packAptr++ = *a_p1_ptr++; *packAptr++ = *a_p2_ptr++; *packAptr++ = 0.0f; } } void internalPackA2(int L, float* packA, float* a, int lda){ float *packAptr = packA; float *a_p0_ptr, *a_p1_ptr; a_p0_ptr = a; a_p1_ptr = a + lda; for(int i = 0; i < L; ++i){ *packAptr++ = *a_p0_ptr++; *packAptr++ = *a_p1_ptr++; *packAptr++ = 0.0f; *packAptr++ = 0.0f; } } void internalPackA1(int L, float* packA, float* a, int lda){ float *packAptr = packA; float *a_p0_ptr; a_p0_ptr = a; for(int i = 0; i < L; ++i){ *packAptr++ = *a_p0_ptr++; *packAptr++ = +0.0f; *packAptr++ = +0.0f; *packAptr++ = +0.0f; } } /* * Helpers that packs A and B in their continguous accessing pattern. */ void internalPackB4(int L, float* packB, float* B, int ldb){ float *bp = B; float *packBptr = packB; for(int i = 0; i < L; ++i){ vst1q_f32(packBptr, vld1q_f32(bp)); packBptr += 4; bp += ldb; } } void internalPackB8(int L, float* packB, float* B, int ldb){ float *bp = B; float *packBptr = packB; for(int i = 0; i < L; ++i){ vst1q_f32(packBptr, vld1q_f32(bp)); vst1q_f32(packBptr + 4, vld1q_f32(bp + 4)); packBptr += 8; bp += ldb; } } void sgemm_4x1(int L, float *a, int lda, float* b, int ldb, float *c, int ldc){ float barr[1]; float *cptr = c; float32x4_t va; float32x4_t vc[1]; vc[0] = vld1q_lane_f32(cptr, vc[0], 0); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 1); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 2); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 3); float *aptr = a; float *bptr = b; for(int p = 0; p < L; ++p){ va = vld1q_f32(aptr); barr[0] = *(bptr+0); #if __aarch64__ vc[0] = vfmaq_n_f32(vc[0], va, barr[0]); #else vc[0] = vmlaq_n_f32(vc[0], va, barr[0]); #endif // __aarch64__ aptr += 4; bptr += ldb; } cptr = c; vst1q_lane_f32(cptr, vc[0], 0); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 1); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 2); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 3); } void sgemm_4x2(int L, float *a, int lda, float* b, int ldb, float *c, int ldc){ float barr[2]; float *cptr = c; float32x4_t va; float32x4_t vc[2]; vc[0] = vld1q_lane_f32(cptr, vc[0], 0); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 0); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 1); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 1); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 2); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 2); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 3); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 3); float *aptr = a; float *bptr = b; for(int p = 0; p < L; ++p){ va = vld1q_f32(aptr); barr[0] = *(bptr+0); barr[1] = *(bptr+1); #if __aarch64__ vc[0] = vfmaq_n_f32(vc[0], va, barr[0]); vc[1] = vfmaq_n_f32(vc[1], va, barr[1]); #else vc[0] = vmlaq_n_f32(vc[0], va, barr[0]); vc[1] = vmlaq_n_f32(vc[1], va, barr[1]); #endif // __aarch64__ aptr += 4; bptr += ldb; } cptr = c; vst1q_lane_f32(cptr, vc[0], 0); vst1q_lane_f32(cptr + 1, vc[1], 0); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 1); vst1q_lane_f32(cptr + 1, vc[1], 1); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 2); vst1q_lane_f32(cptr + 1, vc[1], 2); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 3); vst1q_lane_f32(cptr + 1, vc[1], 3); } void sgemm_4x3(int L, float *a, int lda, float* b, int ldb, float *c, int ldc){ float barr[3]; float *cptr = c; float32x4_t va; float32x4_t vc[3]; vc[0] = vld1q_lane_f32(cptr, vc[0], 0); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 0); vc[2] = vld1q_lane_f32(cptr + 2, vc[2], 0); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 1); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 1); vc[2] = vld1q_lane_f32(cptr + 2, vc[2], 1); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 2); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 2); vc[2] = vld1q_lane_f32(cptr + 2, vc[2], 2); cptr += ldc; vc[0] = vld1q_lane_f32(cptr, vc[0], 3); vc[1] = vld1q_lane_f32(cptr + 1, vc[1], 3); vc[2] = vld1q_lane_f32(cptr + 2, vc[2], 3); float *aptr = a; float *bptr = b; for(int p = 0; p < L; ++p){ va = vld1q_f32(aptr); barr[0] = *(bptr+0); barr[1] = *(bptr+1); barr[2] = *(bptr+2); #if __aarch64__ vc[0] = vfmaq_n_f32(vc[0], va, barr[0]); vc[1] = vfmaq_n_f32(vc[1], va, barr[1]); vc[2] = vfmaq_n_f32(vc[2], va, barr[2]); #else vc[0] = vmlaq_n_f32(vc[0], va, barr[0]); vc[1] = vmlaq_n_f32(vc[1], va, barr[1]); vc[2] = vmlaq_n_f32(vc[2], va, barr[2]); #endif // __aarch64__ aptr += 4; bptr += ldb; } cptr = c; vst1q_lane_f32(cptr, vc[0], 0); vst1q_lane_f32(cptr + 1, vc[1], 0); vst1q_lane_f32(cptr + 2, vc[2], 0); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 1); vst1q_lane_f32(cptr + 1, vc[1], 1); vst1q_lane_f32(cptr + 2, vc[2], 1); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 2); vst1q_lane_f32(cptr + 1, vc[1], 2); vst1q_lane_f32(cptr + 2, vc[2], 2); cptr += ldc; vst1q_lane_f32(cptr, vc[0], 3); vst1q_lane_f32(cptr + 1, vc[1], 3); vst1q_lane_f32(cptr + 2, vc[2], 3); } inline void sgemm_4x4(int L, float *a, int lda, float *b, int ldb, float *c, int ldc) { float *aptr = a; float *bptr = b; float *cptr = c; float32x4_t vb; float32x4_t va0, va1, va2, va3; float32x4_t vc0 = vld1q_f32(cptr); cptr += ldc; float32x4_t vc1 = vld1q_f32(cptr); cptr += ldc; float32x4_t vc2 = vld1q_f32(cptr); cptr += ldc; float32x4_t vc3 = vld1q_f32(cptr); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); va0 = vld1q_dup_f32(aptr); va1 = vld1q_dup_f32(aptr + 1); va2 = vld1q_dup_f32(aptr + 2); va3 = vld1q_dup_f32(aptr + 3); #if __aarch64__ vc0 = vfmaq_f32(vc0, va0, vb); vc1 = vfmaq_f32(vc1, va1, vb); vc2 = vfmaq_f32(vc2, va2, vb); vc3 = vfmaq_f32(vc3, va3, vb); #else vc0 = vmlaq_f32(vc0, va0, vb); vc1 = vmlaq_f32(vc1, va1, vb); vc2 = vmlaq_f32(vc2, va2, vb); vc3 = vmlaq_f32(vc3, va3, vb); #endif // __aarch64__ bptr += ldb; aptr += 4; } cptr = c; vst1q_f32(cptr, vc0); cptr+=ldc; vst1q_f32(cptr, vc1); cptr+=ldc; vst1q_f32(cptr, vc2); cptr+=ldc; vst1q_f32(cptr, vc3); } inline void sgemm_4x5(int L, float *a, int lda, float *b, int ldb, float *c, int ldc) { float *aptr = a; float *bptr = b; float *cptr = c; float b4; float32x4_t vb; float32x4_t va0, va1, va2, va3, va; float32x4_t vc0, vc1, vc2, vc3, vc4, vzero; vzero = vdupq_n_f32(0.0f); vc4 = vzero; vc0 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 0); cptr += ldc; vc1 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 1); cptr += ldc; vc2 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 2); cptr += ldc; vc3 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr + 4); va0 = vld1q_dup_f32(aptr); va1 = vld1q_dup_f32(aptr + 1); va2 = vld1q_dup_f32(aptr + 2); va3 = vld1q_dup_f32(aptr + 3); va = vld1q_f32(aptr); #if __aarch64__ vc0 = vfmaq_f32(vc0, va0, vb); vc1 = vfmaq_f32(vc1, va1, vb); vc2 = vfmaq_f32(vc2, va2, vb); vc3 = vfmaq_f32(vc3, va3, vb); vc4 = vfmaq_n_f32(vc4, va, b4); #else vc0 = vmlaq_f32(vc0, va0, vb); vc1 = vmlaq_f32(vc1, va1, vb); vc2 = vmlaq_f32(vc2, va2, vb); vc3 = vmlaq_f32(vc3, va3, vb); vc4 = vmlaq_n_f32(vc4, va, b4); #endif // __aarch64__ bptr += ldb; aptr += 4; } cptr = c; vst1q_f32(cptr, vc0); vst1q_lane_f32(cptr + 4, vc4, 0); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_lane_f32(cptr + 4, vc4, 1); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_lane_f32(cptr + 4, vc4, 2); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_lane_f32(cptr + 4, vc4, 3); } inline void sgemm_4x6(int L, float *a, int lda, float *b, int ldb, float *c, int ldc) { float *aptr = a; float *bptr = b; float *cptr = c; float b4, b5; float32x4_t vb; float32x4_t va0, va1, va2, va3, va; float32x4_t vc0, vc1, vc2, vc3, vc4, vc5, vzero; vzero = vdupq_n_f32(0.0f); vc4 = vzero; vc5 = vzero; vc0 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 0); vc5 = vld1q_lane_f32(cptr + 5, vc5, 0); cptr += ldc; vc1 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 1); vc5 = vld1q_lane_f32(cptr + 5, vc5, 1); cptr += ldc; vc2 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 2); vc5 = vld1q_lane_f32(cptr + 5, vc5, 2); cptr += ldc; vc3 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 3); vc5 = vld1q_lane_f32(cptr + 5, vc5, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr + 4); b5 = *(bptr + 5); va0 = vld1q_dup_f32(aptr); va1 = vld1q_dup_f32(aptr + 1); va2 = vld1q_dup_f32(aptr + 2); va3 = vld1q_dup_f32(aptr + 3); va = vld1q_f32(aptr); #if __aarch64__ vc0 = vfmaq_f32(vc0, va0, vb); vc1 = vfmaq_f32(vc1, va1, vb); vc2 = vfmaq_f32(vc2, va2, vb); vc3 = vfmaq_f32(vc3, va3, vb); vc4 = vfmaq_n_f32(vc4, va, b4); vc5 = vfmaq_n_f32(vc5, va, b5); #else vc0 = vmlaq_f32(vc0, va0, vb); vc1 = vmlaq_f32(vc1, va1, vb); vc2 = vmlaq_f32(vc2, va2, vb); vc3 = vmlaq_f32(vc3, va3, vb); vc4 = vmlaq_n_f32(vc4, va, b4); vc5 = vmlaq_n_f32(vc5, va, b5); #endif // __aarch64__ bptr += ldb; aptr += 4; } cptr = c; vst1q_f32(cptr, vc0); vst1q_lane_f32(cptr + 4, vc4, 0); vst1q_lane_f32(cptr + 5, vc5, 0); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_lane_f32(cptr + 4, vc4, 1); vst1q_lane_f32(cptr + 5, vc5, 1); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_lane_f32(cptr + 4, vc4, 2); vst1q_lane_f32(cptr + 5, vc5, 2); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_lane_f32(cptr + 4, vc4, 3); vst1q_lane_f32(cptr + 5, vc5, 3); } inline void sgemm_4x7(int L, float *a, int lda, float *b, int ldb, float *c, int ldc) { float *aptr = a; float *bptr = b; float *cptr = c; float b4, b5, b6; float32x4_t vb; float32x4_t va0, va1, va2, va3, va; float32x4_t vc0, vc1, vc2, vc3, vc4, vc5, vc6, vzero; vzero = vdupq_n_f32(0.0f); vc4 = vc5 = vc6 = vzero; vc0 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 0); vc5 = vld1q_lane_f32(cptr + 5, vc5, 0); vc6 = vld1q_lane_f32(cptr + 6, vc6, 0); cptr += ldc; vc1 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 1); vc5 = vld1q_lane_f32(cptr + 5, vc5, 1); vc6 = vld1q_lane_f32(cptr + 6, vc6, 1); cptr += ldc; vc2 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 2); vc5 = vld1q_lane_f32(cptr + 5, vc5, 2); vc6 = vld1q_lane_f32(cptr + 6, vc6, 2); cptr += ldc; vc3 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 3); vc5 = vld1q_lane_f32(cptr + 5, vc5, 3); vc6 = vld1q_lane_f32(cptr + 6, vc6, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr + 4); b5 = *(bptr + 5); b6 = *(bptr + 6); va0 = vld1q_dup_f32(aptr); va1 = vld1q_dup_f32(aptr + 1); va2 = vld1q_dup_f32(aptr + 2); va3 = vld1q_dup_f32(aptr + 3); va = vld1q_f32(aptr); #if __aarch64__ vc0 = vfmaq_f32(vc0, va0, vb); vc1 = vfmaq_f32(vc1, va1, vb); vc2 = vfmaq_f32(vc2, va2, vb); vc3 = vfmaq_f32(vc3, va3, vb); vc4 = vfmaq_n_f32(vc4, va, b4); vc5 = vfmaq_n_f32(vc5, va, b5); vc6 = vfmaq_n_f32(vc6, va, b6); #else vc0 = vmlaq_f32(vc0, va0, vb); vc1 = vmlaq_f32(vc1, va1, vb); vc2 = vmlaq_f32(vc2, va2, vb); vc3 = vmlaq_f32(vc3, va3, vb); vc4 = vmlaq_n_f32(vc4, va, b4); vc5 = vmlaq_n_f32(vc5, va, b5); vc6 = vmlaq_n_f32(vc6, va, b6); #endif // __aarch64__ bptr += ldb; aptr += 4; } cptr = c; vst1q_f32(cptr, vc0); vst1q_lane_f32(cptr + 4, vc4, 0); vst1q_lane_f32(cptr + 5, vc5, 0); vst1q_lane_f32(cptr + 6, vc6, 0); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_lane_f32(cptr + 4, vc4, 1); vst1q_lane_f32(cptr + 5, vc5, 1); vst1q_lane_f32(cptr + 6, vc6, 1); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_lane_f32(cptr + 4, vc4, 2); vst1q_lane_f32(cptr + 5, vc5, 2); vst1q_lane_f32(cptr + 6, vc6, 2); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_lane_f32(cptr + 4, vc4, 3); vst1q_lane_f32(cptr + 5, vc5, 3); vst1q_lane_f32(cptr + 6, vc6, 3); } void sgemm_8x1(int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ float *aptr = a; float *bptr = b; float *cptr = c; float b4; float32x4_t vzero = vdupq_n_f32(0.0f); float32x4_t vb; float32x4_t va0, va1; float32x4_t vc4; //next 4 rows float32x4_t vcE; //vc 4 5 6 and E F G hold column values. vc4 = vcE = vzero; vc4 = vld1q_lane_f32(cptr, vc4, 0); cptr += ldc; vc4 = vld1q_lane_f32(cptr, vc4, 1); cptr += ldc; vc4 = vld1q_lane_f32(cptr, vc4, 2); cptr += ldc; vc4 = vld1q_lane_f32(cptr, vc4, 3); cptr += ldc; vcE = vld1q_lane_f32(cptr, vcE, 0); cptr += ldc; vcE = vld1q_lane_f32(cptr, vcE, 1); cptr += ldc; vcE = vld1q_lane_f32(cptr, vcE, 2); cptr += ldc; vcE = vld1q_lane_f32(cptr, vcE, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); //A row in A multiplies a single value in B by column #if __aarch64__ vc4 = vfmaq_n_f32(vc4, va0, b4); vcE = vfmaq_n_f32(vcE, va1, b4); #else vc4 = vmlaq_n_f32(vc4, va0, b4); vcE = vmlaq_n_f32(vcE, va1, b4); #endif // __aarch64__ bptr += ldb; aptr += 8; } cptr = c; vst1q_lane_f32(cptr, vc4, 0); cptr+=ldc; vst1q_lane_f32(cptr, vc4, 1); cptr+=ldc; vst1q_lane_f32(cptr, vc4, 2); cptr+=ldc; vst1q_lane_f32(cptr, vc4, 3); cptr+=ldc; vst1q_lane_f32(cptr, vcE, 0); cptr+=ldc; vst1q_lane_f32(cptr, vcE, 1); cptr+=ldc; vst1q_lane_f32(cptr, vcE, 2); cptr+=ldc; vst1q_lane_f32(cptr, vcE, 3); } void sgemm_8x2(int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ float *aptr = a; float *bptr = b; float *cptr = c; float b4, b5; float32x4_t vzero = vdupq_n_f32(0.0f); float32x4_t vb; float32x4_t va0, va1; float32x4_t vc4, vc5; //next 4 rows float32x4_t vcE, vcF; vc4 = vc5 = vcE = vcF = vzero; //vc 4 5 6 and E F G hold column values. vc4 = vld1q_lane_f32(cptr + 0, vc4, 0); vc5 = vld1q_lane_f32(cptr + 1, vc5, 0); cptr += ldc; vc4 = vld1q_lane_f32(cptr + 0, vc4, 1); vc5 = vld1q_lane_f32(cptr + 1, vc5, 1); cptr += ldc; vc4 = vld1q_lane_f32(cptr + 0, vc4, 2); vc5 = vld1q_lane_f32(cptr + 1, vc5, 2); cptr += ldc; vc4 = vld1q_lane_f32(cptr + 0, vc4, 3); vc5 = vld1q_lane_f32(cptr + 1, vc5, 3); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 0); vcF = vld1q_lane_f32(cptr + 1, vcF, 0); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 1); vcF = vld1q_lane_f32(cptr + 1, vcF, 1); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 2); vcF = vld1q_lane_f32(cptr + 1, vcF, 2); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 3); vcF = vld1q_lane_f32(cptr + 1, vcF, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr ); b5 = *(bptr + 1); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); //A row in A multiplies a single value in B by column #if __aarch64__ vc4 = vfmaq_n_f32(vc4, va0, b4); vc5 = vfmaq_n_f32(vc5, va0, b5); vcE = vfmaq_n_f32(vcE, va1, b4); vcF = vfmaq_n_f32(vcF, va1, b5); #else vc4 = vmlaq_n_f32(vc4, va0, b4); vc5 = vmlaq_n_f32(vc5, va0, b5); vcE = vmlaq_n_f32(vcE, va1, b4); vcF = vmlaq_n_f32(vcF, va1, b5); #endif // __aarch64__ bptr += ldb; aptr += 8; } cptr = c; vst1q_lane_f32(cptr + 0, vc4, 0); vst1q_lane_f32(cptr + 1, vc5, 0); cptr+=ldc; vst1q_lane_f32(cptr + 0, vc4, 1); vst1q_lane_f32(cptr + 1, vc5, 1); cptr+=ldc; vst1q_lane_f32(cptr + 0, vc4, 2); vst1q_lane_f32(cptr + 1, vc5, 2); cptr+=ldc; vst1q_lane_f32(cptr + 0, vc4, 3); vst1q_lane_f32(cptr + 1, vc5, 3); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 0); vst1q_lane_f32(cptr + 1, vcF, 0); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 1); vst1q_lane_f32(cptr + 1, vcF, 1); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 2); vst1q_lane_f32(cptr + 1, vcF, 2); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 3); vst1q_lane_f32(cptr + 1, vcF, 3); } void sgemm_8x3(int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ float *aptr = a; float *bptr = b; float *cptr = c; float b4, b5, b6; float32x4_t vb; float32x4_t va0, va1; float32x4_t vc4, vc5, vc6; //next 4 rows float32x4_t vcE, vcF, vcG; float32x4_t vzero = vdupq_n_f32(0.0f); vc4 = vc5 = vc6 = vcE = vcF = vcG = vzero; //vc 4 5 6 and E F G hold column values. vc4 = vld1q_lane_f32(cptr + 0, vc4, 0); vc5 = vld1q_lane_f32(cptr + 1, vc5, 0); vc6 = vld1q_lane_f32(cptr + 2, vc6, 0); cptr += ldc; vc4 = vld1q_lane_f32(cptr + 0, vc4, 1); vc5 = vld1q_lane_f32(cptr + 1, vc5, 1); vc6 = vld1q_lane_f32(cptr + 2, vc6, 1); cptr += ldc; vc4 = vld1q_lane_f32(cptr + 0, vc4, 2); vc5 = vld1q_lane_f32(cptr + 1, vc5, 2); vc6 = vld1q_lane_f32(cptr + 2, vc6, 2); cptr += ldc; vc4 = vld1q_lane_f32(cptr + 0, vc4, 3); vc5 = vld1q_lane_f32(cptr + 1, vc5, 3); vc6 = vld1q_lane_f32(cptr + 2, vc6, 3); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 0); vcF = vld1q_lane_f32(cptr + 1, vcF, 0); vcG = vld1q_lane_f32(cptr + 2, vcG, 0); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 1); vcF = vld1q_lane_f32(cptr + 1, vcF, 1); vcG = vld1q_lane_f32(cptr + 2, vcG, 1); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 2); vcF = vld1q_lane_f32(cptr + 1, vcF, 2); vcG = vld1q_lane_f32(cptr + 2, vcG, 2); cptr += ldc; vcE = vld1q_lane_f32(cptr + 0, vcE, 3); vcF = vld1q_lane_f32(cptr + 1, vcF, 3); vcG = vld1q_lane_f32(cptr + 2, vcG, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr ); b5 = *(bptr + 1); b6 = *(bptr + 2); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); #if __aarch64__ //A row in A multiplies a single value in B by column vc4 = vfmaq_n_f32(vc4, va0, b4); vc5 = vfmaq_n_f32(vc5, va0, b5); vc6 = vfmaq_n_f32(vc6, va0, b6); vcE = vfmaq_n_f32(vcE, va1, b4); vcF = vfmaq_n_f32(vcF, va1, b5); vcG = vfmaq_n_f32(vcG, va1, b6); #else vc4 = vmlaq_n_f32(vc4, va0, b4); vc5 = vmlaq_n_f32(vc5, va0, b5); vc6 = vmlaq_n_f32(vc6, va0, b6); vcE = vmlaq_n_f32(vcE, va1, b4); vcF = vmlaq_n_f32(vcF, va1, b5); vcG = vmlaq_n_f32(vcG, va1, b6); #endif // __aarch64__ bptr += ldb; aptr += 8; } cptr = c; vst1q_lane_f32(cptr + 0, vc4, 0); vst1q_lane_f32(cptr + 1, vc5, 0); vst1q_lane_f32(cptr + 2, vc6, 0); cptr+=ldc; vst1q_lane_f32(cptr + 0, vc4, 1); vst1q_lane_f32(cptr + 1, vc5, 1); vst1q_lane_f32(cptr + 2, vc6, 1); cptr+=ldc; vst1q_lane_f32(cptr + 0, vc4, 2); vst1q_lane_f32(cptr + 1, vc5, 2); vst1q_lane_f32(cptr + 2, vc6, 2); cptr+=ldc; vst1q_lane_f32(cptr + 0, vc4, 3); vst1q_lane_f32(cptr + 1, vc5, 3); vst1q_lane_f32(cptr + 2, vc6, 3); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 0); vst1q_lane_f32(cptr + 1, vcF, 0); vst1q_lane_f32(cptr + 2, vcG, 0); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 1); vst1q_lane_f32(cptr + 1, vcF, 1); vst1q_lane_f32(cptr + 2, vcG, 1); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 2); vst1q_lane_f32(cptr + 1, vcF, 2); vst1q_lane_f32(cptr + 2, vcG, 2); cptr+=ldc; vst1q_lane_f32(cptr + 0, vcE, 3); vst1q_lane_f32(cptr + 1, vcF, 3); vst1q_lane_f32(cptr + 2, vcG, 3); } void sgemm_8x4(int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ float *aptr = a; float *bptr = b; float *cptr = c; float32x4_t vb; float32x4_t va0, va1; float32x4_t vc0, vc1, vc2, vc3; //next 4 rows float32x4_t vcA, vcB, vcC, vcD; //vc0 1 2 3 and A B C D hold row values. vc0 = vld1q_f32(cptr); cptr += ldc; vc1 = vld1q_f32(cptr); cptr += ldc; vc2 = vld1q_f32(cptr); cptr += ldc; vc3 = vld1q_f32(cptr); cptr += ldc; vcA = vld1q_f32(cptr); cptr += ldc; vcB = vld1q_f32(cptr); cptr += ldc; vcC = vld1q_f32(cptr); cptr += ldc; vcD = vld1q_f32(cptr); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); #if __aarch64__ vc0 = vfmaq_laneq_f32(vc0, vb, va0, 0); vc1 = vfmaq_laneq_f32(vc1, vb, va0, 1); vc2 = vfmaq_laneq_f32(vc2, vb, va0, 2); vc3 = vfmaq_laneq_f32(vc3, vb, va0, 3); vcA = vfmaq_laneq_f32(vcA, vb, va1, 0); vcB = vfmaq_laneq_f32(vcB, vb, va1, 1); vcC = vfmaq_laneq_f32(vcC, vb, va1, 2); vcD = vfmaq_laneq_f32(vcD, vb, va1, 3); #else vc0 = vmlaq_f32(vc0, vb, vld1q_dup_f32(aptr + 0)); vc1 = vmlaq_f32(vc1, vb, vld1q_dup_f32(aptr + 1)); vc2 = vmlaq_f32(vc2, vb, vld1q_dup_f32(aptr + 2)); vc3 = vmlaq_f32(vc3, vb, vld1q_dup_f32(aptr + 3)); vcA = vmlaq_f32(vcA, vb, vld1q_dup_f32(aptr + 4)); vcB = vmlaq_f32(vcB, vb, vld1q_dup_f32(aptr + 5)); vcC = vmlaq_f32(vcC, vb, vld1q_dup_f32(aptr + 6)); vcD = vmlaq_f32(vcD, vb, vld1q_dup_f32(aptr + 7)); #endif // __aarch64__ bptr += ldb; aptr += 8; } cptr = c; vst1q_f32(cptr, vc0); cptr+=ldc; vst1q_f32(cptr, vc1); cptr+=ldc; vst1q_f32(cptr, vc2); cptr+=ldc; vst1q_f32(cptr, vc3); cptr+=ldc; vst1q_f32(cptr, vcA); cptr+=ldc; vst1q_f32(cptr, vcB); cptr+=ldc; vst1q_f32(cptr, vcC); cptr+=ldc; vst1q_f32(cptr, vcD); } void sgemm_8x5(int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ float *aptr = a; float *bptr = b; float *cptr = c; float b4; float32x4_t vb; float32x4_t va0, va1; float32x4_t vc0, vc1, vc2, vc3, vc4; //next 4 rows float32x4_t vcA, vcB, vcC, vcD, vcE; float32x4_t vzero = vdupq_n_f32(0.0f); vc4 = vcE = vzero; //vc0 1 2 3 and A B C D hold row values. vc0 = vld1q_f32(cptr); //vc 4 5 6 and E F G hold column values. vc4 = vld1q_lane_f32(cptr + 4, vc4, 0); cptr += ldc; vc1 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 1); cptr += ldc; vc2 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 2); cptr += ldc; vc3 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 3); cptr += ldc; vcA = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 0); cptr += ldc; vcB = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 1); cptr += ldc; vcC = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 2); cptr += ldc; vcD = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr + 4); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); #if __aarch64__ vc0 = vfmaq_laneq_f32(vc0, vb, va0, 0); vc1 = vfmaq_laneq_f32(vc1, vb, va0, 1); vc2 = vfmaq_laneq_f32(vc2, vb, va0, 2); vc3 = vfmaq_laneq_f32(vc3, vb, va0, 3); vcA = vfmaq_laneq_f32(vcA, vb, va1, 0); vcB = vfmaq_laneq_f32(vcB, vb, va1, 1); vcC = vfmaq_laneq_f32(vcC, vb, va1, 2); vcD = vfmaq_laneq_f32(vcD, vb, va1, 3); //A row in A multiplies a single value in B by column vc4 = vfmaq_n_f32(vc4, va0, b4); vcE = vfmaq_n_f32(vcE, va1, b4); #else vc0 = vmlaq_f32(vc0, vb, vld1q_dup_f32(aptr + 0)); vc1 = vmlaq_f32(vc1, vb, vld1q_dup_f32(aptr + 1)); vc2 = vmlaq_f32(vc2, vb, vld1q_dup_f32(aptr + 2)); vc3 = vmlaq_f32(vc3, vb, vld1q_dup_f32(aptr + 3)); vcA = vmlaq_f32(vcA, vb, vld1q_dup_f32(aptr + 4)); vcB = vmlaq_f32(vcB, vb, vld1q_dup_f32(aptr + 5)); vcC = vmlaq_f32(vcC, vb, vld1q_dup_f32(aptr + 6)); vcD = vmlaq_f32(vcD, vb, vld1q_dup_f32(aptr + 7)); //A row in A multiplies a single value in B by column vc4 = vmlaq_n_f32(vc4, va0, b4); vcE = vmlaq_n_f32(vcE, va1, b4); #endif // __aarch64__ bptr += ldb; aptr += 8; } cptr = c; vst1q_f32(cptr, vc0); vst1q_lane_f32(cptr + 4, vc4, 0); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_lane_f32(cptr + 4, vc4, 1); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_lane_f32(cptr + 4, vc4, 2); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_lane_f32(cptr + 4, vc4, 3); cptr+=ldc; vst1q_f32(cptr, vcA); vst1q_lane_f32(cptr + 4, vcE, 0); cptr+=ldc; vst1q_f32(cptr, vcB); vst1q_lane_f32(cptr + 4, vcE, 1); cptr+=ldc; vst1q_f32(cptr, vcC); vst1q_lane_f32(cptr + 4, vcE, 2); cptr+=ldc; vst1q_f32(cptr, vcD); vst1q_lane_f32(cptr + 4, vcE, 3); } void sgemm_8x6(int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ float *aptr = a; float *bptr = b; float *cptr = c; float b4, b5; float32x4_t vb; float32x4_t va0, va1; float32x4_t vc0, vc1, vc2, vc3, vc4, vc5; //next 4 rows float32x4_t vcA, vcB, vcC, vcD, vcE, vcF; float32x4_t vzero = vdupq_n_f32(0.0f); vc4 = vc5 = vcE = vcF = vzero; //vc0 1 2 3 and A B C D hold row values. vc0 = vld1q_f32(cptr); //vc 4 5 6 and E F G hold column values. vc4 = vld1q_lane_f32(cptr + 4, vc4, 0); vc5 = vld1q_lane_f32(cptr + 5, vc5, 0); cptr += ldc; vc1 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 1); vc5 = vld1q_lane_f32(cptr + 5, vc5, 1); cptr += ldc; vc2 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 2); vc5 = vld1q_lane_f32(cptr + 5, vc5, 2); cptr += ldc; vc3 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 3); vc5 = vld1q_lane_f32(cptr + 5, vc5, 3); cptr += ldc; vcA = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 0); vcF = vld1q_lane_f32(cptr + 5, vcF, 0); cptr += ldc; vcB = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 1); vcF = vld1q_lane_f32(cptr + 5, vcF, 1); cptr += ldc; vcC = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 2); vcF = vld1q_lane_f32(cptr + 5, vcF, 2); cptr += ldc; vcD = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 3); vcF = vld1q_lane_f32(cptr + 5, vcF, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr + 4); b5 = *(bptr + 5); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); #if __aarch64__ vc0 = vfmaq_laneq_f32(vc0, vb, va0, 0); vc1 = vfmaq_laneq_f32(vc1, vb, va0, 1); vc2 = vfmaq_laneq_f32(vc2, vb, va0, 2); vc3 = vfmaq_laneq_f32(vc3, vb, va0, 3); vcA = vfmaq_laneq_f32(vcA, vb, va1, 0); vcB = vfmaq_laneq_f32(vcB, vb, va1, 1); vcC = vfmaq_laneq_f32(vcC, vb, va1, 2); vcD = vfmaq_laneq_f32(vcD, vb, va1, 3); //A row in A multiplies a single value in B by column vc4 = vfmaq_n_f32(vc4, va0, b4); vc5 = vfmaq_n_f32(vc5, va0, b5); vcE = vfmaq_n_f32(vcE, va1, b4); vcF = vfmaq_n_f32(vcF, va1, b5); #else vc0 = vmlaq_f32(vc0, vb, vld1q_dup_f32(aptr + 0)); vc1 = vmlaq_f32(vc1, vb, vld1q_dup_f32(aptr + 0)); vc2 = vmlaq_f32(vc2, vb, vld1q_dup_f32(aptr + 0)); vc3 = vmlaq_f32(vc3, vb, vld1q_dup_f32(aptr + 0)); vcA = vmlaq_f32(vcA, vb, vld1q_dup_f32(aptr + 4)); vcB = vmlaq_f32(vcB, vb, vld1q_dup_f32(aptr + 5)); vcC = vmlaq_f32(vcC, vb, vld1q_dup_f32(aptr + 6)); vcD = vmlaq_f32(vcD, vb, vld1q_dup_f32(aptr + 7)); //A row in A multiplies a single value in B by column vc4 = vmlaq_n_f32(vc4, va0, b4); vc5 = vmlaq_n_f32(vc5, va0, b5); vcE = vmlaq_n_f32(vcE, va1, b4); vcF = vmlaq_n_f32(vcF, va1, b5); #endif // __aarch64__ bptr += ldb; aptr += 8; } cptr = c; vst1q_f32(cptr, vc0); vst1q_lane_f32(cptr + 4, vc4, 0); vst1q_lane_f32(cptr + 5, vc5, 0); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_lane_f32(cptr + 4, vc4, 1); vst1q_lane_f32(cptr + 5, vc5, 1); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_lane_f32(cptr + 4, vc4, 2); vst1q_lane_f32(cptr + 5, vc5, 2); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_lane_f32(cptr + 4, vc4, 3); vst1q_lane_f32(cptr + 5, vc5, 3); cptr+=ldc; vst1q_f32(cptr, vcA); vst1q_lane_f32(cptr + 4, vcE, 0); vst1q_lane_f32(cptr + 5, vcF, 0); cptr+=ldc; vst1q_f32(cptr, vcB); vst1q_lane_f32(cptr + 4, vcE, 1); vst1q_lane_f32(cptr + 5, vcF, 1); cptr+=ldc; vst1q_f32(cptr, vcC); vst1q_lane_f32(cptr + 4, vcE, 2); vst1q_lane_f32(cptr + 5, vcF, 2); cptr+=ldc; vst1q_f32(cptr, vcD); vst1q_lane_f32(cptr + 4, vcE, 3); vst1q_lane_f32(cptr + 5, vcF, 3); } void sgemm_8x7(int L, float *a, int lda, float *b, int ldb, float *c, int ldc){ float *aptr = a; float *bptr = b; float *cptr = c; float b4, b5, b6; float32x4_t vb; float32x4_t va0, va1; float32x4_t vc0, vc1, vc2, vc3, vc4, vc5, vc6; //next 4 rows float32x4_t vcA, vcB, vcC, vcD, vcE, vcF, vcG; float32x4_t vzero = vdupq_n_f32(0.0f); vc4 = vc5 = vc6 = vcE = vcF = vcG = vzero; //vc0 1 2 3 and A B C D hold row values. vc0 = vld1q_f32(cptr); //vc 4 5 6 and E F G hold column values. vc4 = vld1q_lane_f32(cptr + 4, vc4, 0); vc5 = vld1q_lane_f32(cptr + 5, vc5, 0); vc6 = vld1q_lane_f32(cptr + 6, vc6, 0); cptr += ldc; vc1 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 1); vc5 = vld1q_lane_f32(cptr + 5, vc5, 1); vc6 = vld1q_lane_f32(cptr + 6, vc6, 1); cptr += ldc; vc2 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 2); vc5 = vld1q_lane_f32(cptr + 5, vc5, 2); vc6 = vld1q_lane_f32(cptr + 6, vc6, 2); cptr += ldc; vc3 = vld1q_f32(cptr); vc4 = vld1q_lane_f32(cptr + 4, vc4, 3); vc5 = vld1q_lane_f32(cptr + 5, vc5, 3); vc6 = vld1q_lane_f32(cptr + 6, vc6, 3); cptr += ldc; vcA = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 0); vcF = vld1q_lane_f32(cptr + 5, vcF, 0); vcG = vld1q_lane_f32(cptr + 6, vcG, 0); cptr += ldc; vcB = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 1); vcF = vld1q_lane_f32(cptr + 5, vcF, 1); vcG = vld1q_lane_f32(cptr + 6, vcG, 1); cptr += ldc; vcC = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 2); vcF = vld1q_lane_f32(cptr + 5, vcF, 2); vcG = vld1q_lane_f32(cptr + 6, vcG, 2); cptr += ldc; vcD = vld1q_f32(cptr); vcE = vld1q_lane_f32(cptr + 4, vcE, 3); vcF = vld1q_lane_f32(cptr + 5, vcF, 3); vcG = vld1q_lane_f32(cptr + 6, vcG, 3); for(int p = 0; p < L; ++p){ vb = vld1q_f32(bptr); b4 = *(bptr + 4); b5 = *(bptr + 5); b6 = *(bptr + 6); va0 = vld1q_f32(aptr); va1 = vld1q_f32(aptr + 4); #if __aarch64__ vc0 = vfmaq_laneq_f32(vc0, vb, va0, 0); vc1 = vfmaq_laneq_f32(vc1, vb, va0, 1); vc2 = vfmaq_laneq_f32(vc2, vb, va0, 2); vc3 = vfmaq_laneq_f32(vc3, vb, va0, 3); vcA = vfmaq_laneq_f32(vcA, vb, va1, 0); vcB = vfmaq_laneq_f32(vcB, vb, va1, 1); vcC = vfmaq_laneq_f32(vcC, vb, va1, 2); vcD = vfmaq_laneq_f32(vcD, vb, va1, 3); //A row in A multiplies a single value in B by column vc4 = vfmaq_n_f32(vc4, va0, b4); vc5 = vfmaq_n_f32(vc5, va0, b5); vc6 = vfmaq_n_f32(vc6, va0, b6); vcE = vfmaq_n_f32(vcE, va1, b4); vcF = vfmaq_n_f32(vcF, va1, b5); vcG = vfmaq_n_f32(vcG, va1, b6); #else vc0 = vmlaq_f32(vc0, vb, vld1q_dup_f32(aptr + 0)); vc1 = vmlaq_f32(vc1, vb, vld1q_dup_f32(aptr + 1)); vc2 = vmlaq_f32(vc2, vb, vld1q_dup_f32(aptr + 2)); vc3 = vmlaq_f32(vc3, vb, vld1q_dup_f32(aptr + 3)); vcA = vmlaq_f32(vcA, vb, vld1q_dup_f32(aptr + 4)); vcB = vmlaq_f32(vcB, vb, vld1q_dup_f32(aptr + 5)); vcC = vmlaq_f32(vcC, vb, vld1q_dup_f32(aptr + 6)); vcD = vmlaq_f32(vcD, vb, vld1q_dup_f32(aptr + 7)); //A row in A multiplies a single value in B by column vc4 = vmlaq_n_f32(vc4, va0, b4); vc5 = vmlaq_n_f32(vc5, va0, b5); vc6 = vmlaq_n_f32(vc6, va0, b6); vcE = vmlaq_n_f32(vcE, va1, b4); vcF = vmlaq_n_f32(vcF, va1, b5); vcG = vmlaq_n_f32(vcG, va1, b6); #endif // __aarch64__ bptr += ldb; aptr += 8; } cptr = c; vst1q_f32(cptr, vc0); vst1q_lane_f32(cptr + 4, vc4, 0); vst1q_lane_f32(cptr + 5, vc5, 0); vst1q_lane_f32(cptr + 6, vc6, 0); cptr+=ldc; vst1q_f32(cptr, vc1); vst1q_lane_f32(cptr + 4, vc4, 1); vst1q_lane_f32(cptr + 5, vc5, 1); vst1q_lane_f32(cptr + 6, vc6, 1); cptr+=ldc; vst1q_f32(cptr, vc2); vst1q_lane_f32(cptr + 4, vc4, 2); vst1q_lane_f32(cptr + 5, vc5, 2); vst1q_lane_f32(cptr + 6, vc6, 2); cptr+=ldc; vst1q_f32(cptr, vc3); vst1q_lane_f32(cptr + 4, vc4, 3); vst1q_lane_f32(cptr + 5, vc5, 3); vst1q_lane_f32(cptr + 6, vc6, 3); cptr+=ldc; vst1q_f32(cptr, vcA); vst1q_lane_f32(cptr + 4, vcE, 0); vst1q_lane_f32(cptr + 5, vcF, 0); vst1q_lane_f32(cptr + 6, vcG, 0); cptr+=ldc; vst1q_f32(cptr, vcB); vst1q_lane_f32(cptr + 4, vcE, 1); vst1q_lane_f32(cptr + 5, vcF, 1); vst1q_lane_f32(cptr + 6, vcG, 1); cptr+=ldc; vst1q_f32(cptr, vcC); vst1q_lane_f32(cptr + 4, vcE, 2); vst1q_lane_f32(cptr + 5, vcF, 2); vst1q_lane_f32(cptr + 6, vcG, 2); cptr+=ldc; vst1q_f32(cptr, vcD); vst1q_lane_f32(cptr + 4, vcE, 3); vst1q_lane_f32(cptr + 5, vcF, 3); vst1q_lane_f32(cptr + 6, vcG, 3); }
ba_sparse_matrix.h
/* * Copyright (C) 2015, Simon Fuhrmann, Fabian Langguth * TU Darmstadt - Graphics, Capture and Massively Parallel Computing * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #ifndef SFM_SPARSE_MATRIX_HEADER #define SFM_SPARSE_MATRIX_HEADER #include <thread> #include <stdexcept> #include <vector> #include <algorithm> #include "sfm/ba_dense_vector.h" #include "sfm/defines.h" SFM_NAMESPACE_BEGIN SFM_BA_NAMESPACE_BEGIN /** * Sparse matrix class in Yale format for column-major matrices. */ template <typename T> class SparseMatrix { public: /** Triplet with row/col index, and the actual value. */ struct Triplet { Triplet (void) = default; Triplet (std::size_t row, std::size_t col, T const& value); std::size_t row; std::size_t col; T value; }; /** List of triplets. */ typedef std::vector<Triplet> Triplets; public: SparseMatrix (void); SparseMatrix (std::size_t rows, std::size_t cols); void allocate (std::size_t rows, std::size_t cols); void reserve (std::size_t num_elements); void set_from_triplets (Triplets const& triplets); void mult_diagonal (T const& factor); void cwise_invert (void); void column_nonzeros (std::size_t col, DenseVector<T>* vector) const; SparseMatrix transpose (void) const; SparseMatrix subtract (SparseMatrix const& rhs) const; SparseMatrix multiply (SparseMatrix const& rhs) const; SparseMatrix sequential_multiply (SparseMatrix const& rhs) const; SparseMatrix parallel_multiply (SparseMatrix const& rhs) const; DenseVector<T> multiply (DenseVector<T> const& rhs) const; SparseMatrix diagonal_matrix (void) const; std::size_t num_non_zero (void) const; std::size_t num_rows (void) const; std::size_t num_cols (void) const; T* begin (void); T* end (void); void debug (void) const; private: std::size_t rows; std::size_t cols; std::vector<T> values; std::vector<std::size_t> outer; std::vector<std::size_t> inner; }; SFM_BA_NAMESPACE_END SFM_NAMESPACE_END /* ------------------------ Implementation ------------------------ */ #include <iostream> SFM_NAMESPACE_BEGIN SFM_BA_NAMESPACE_BEGIN template <typename T> SparseMatrix<T>::Triplet::Triplet (std::size_t row, std::size_t col, T const& value) : row(row), col(col), value(value) { } /* --------------------------------------------------------------- */ template <typename T> SparseMatrix<T>::SparseMatrix (void) : rows(0) , cols(0) { } template <typename T> SparseMatrix<T>::SparseMatrix (std::size_t rows, std::size_t cols) { this->allocate(rows, cols); } template <typename T> void SparseMatrix<T>::allocate (std::size_t rows, std::size_t cols) { this->rows = rows; this->cols = cols; this->values.clear(); this->outer.clear(); this->inner.clear(); this->outer.resize(cols + 1, 0); } template <typename T> void SparseMatrix<T>::reserve (std::size_t num_elements) { this->inner.reserve(num_elements); this->values.reserve(num_elements); } template <typename T> void SparseMatrix<T>::set_from_triplets (Triplets const& triplets) { /* Create a temporary transposed matrix */ SparseMatrix<T> transposed(this->cols, this->rows); transposed.values.resize(triplets.size()); transposed.inner.resize(triplets.size()); /* Initialize outer indices with amount of inner values. */ for (std::size_t i = 0; i < triplets.size(); ++i) transposed.outer[triplets[i].row]++; /* Convert amounts to indices with prefix sum. */ std::size_t sum = 0; std::vector<std::size_t> scratch(transposed.outer.size()); for (std::size_t i = 0; i < transposed.outer.size(); ++i) { std::size_t const temp = transposed.outer[i]; transposed.outer[i] = sum; scratch[i] = sum; sum += temp; } /* Add triplets, inner indices are unsorted. */ for (std::size_t i = 0; i < triplets.size(); ++i) { Triplet const& t = triplets[i]; std::size_t pos = scratch[t.row]++; transposed.values[pos] = t.value; transposed.inner[pos] = t.col; } /* Transpose matrix, implicit sorting of inner indices. */ *this = transposed.transpose(); } template <typename T> SparseMatrix<T> SparseMatrix<T>::transpose (void) const { SparseMatrix ret(this->cols, this->rows); ret.values.resize(this->num_non_zero()); ret.inner.resize(this->num_non_zero()); /* Compute inner sizes of transposed matrix. */ for(std::size_t i = 0; i < this->inner.size(); ++i) ret.outer[this->inner[i]] += 1; /* Compute outer sizes of transposed matrix with prefix sum. */ std::size_t sum = 0; std::vector<std::size_t> scratch(ret.outer.size()); for (std::size_t i = 0; i < ret.outer.size(); ++i) { std::size_t const temp = ret.outer[i]; ret.outer[i] = sum; scratch[i] = sum; sum += temp; } /* Write inner indices and values of transposed matrix. */ for (std::size_t i = 0; i < this->outer.size() - 1; ++i) for (std::size_t j = this->outer[i]; j < this->outer[i + 1]; ++j) { std::size_t pos = scratch[this->inner[j]]++; ret.inner[pos] = i; ret.values[pos] = this->values[j]; } return ret; } template <typename T> SparseMatrix<T> SparseMatrix<T>::subtract (SparseMatrix const& rhs) const { if (this->rows != rhs.rows || this->cols != rhs.cols) throw std::invalid_argument("Incompatible matrix dimensions"); SparseMatrix ret(this->rows, this->cols); ret.reserve(this->num_non_zero() + rhs.num_non_zero()); std::size_t num_outer = this->outer.size() - 1; for (std::size_t outer = 0; outer < num_outer; ++outer) { ret.outer[outer] = ret.values.size(); std::size_t i1 = this->outer[outer]; std::size_t i2 = rhs.outer[outer]; std::size_t const i1_end = this->outer[outer + 1]; std::size_t const i2_end = rhs.outer[outer + 1]; while (i1 < i1_end || i2 < i2_end) { if (i1 >= i1_end) { ret.values.push_back(-rhs.values[i2]); ret.inner.push_back(rhs.inner[i2]); i2 += 1; continue; } if (i2 >= i2_end) { ret.values.push_back(this->values[i1]); ret.inner.push_back(this->inner[i1]); i1 += 1; continue; } std::size_t id1 = this->inner[i1]; std::size_t id2 = rhs.inner[i2]; if (id1 < id2) ret.values.push_back(this->values[i1]); else if (id2 < id1) ret.values.push_back(-rhs.values[i2]); else ret.values.push_back(this->values[i1] - rhs.values[i2]); i1 += static_cast<std::size_t>(id1 <= id2); i2 += static_cast<std::size_t>(id2 <= id1); ret.inner.push_back(std::min(id1, id2)); } } ret.outer.back() = ret.values.size(); return ret; } template <typename T> SparseMatrix<T> SparseMatrix<T>::multiply (SparseMatrix const& rhs) const { #ifdef _OPENMP return this->parallel_multiply(rhs); #else return this->sequential_multiply(rhs); #endif } template <typename T> SparseMatrix<T> SparseMatrix<T>::sequential_multiply (SparseMatrix const& rhs) const { if (this->cols != rhs.rows) throw std::invalid_argument("Incompatible matrix dimensions"); SparseMatrix ret(this->rows, rhs.cols); ret.reserve(this->num_non_zero() + rhs.num_non_zero()); /* Matrix-matrix multiplication. */ std::vector<T> ret_col(ret.rows, T(0)); std::vector<bool> ret_nonzero(ret.rows, false); for (std::size_t col = 0; col < ret.cols; ++col) { ret.outer[col] = ret.values.size(); std::fill(ret_col.begin(), ret_col.end(), T(0)); std::fill(ret_nonzero.begin(), ret_nonzero.end(), false); std::size_t rhs_col_begin = rhs.outer[col]; std::size_t rhs_col_end = rhs.outer[col + 1]; for (std::size_t i = rhs_col_begin; i < rhs_col_end; ++i) { T const& rhs_col_value = rhs.values[i]; std::size_t const lhs_col = rhs.inner[i]; std::size_t const lhs_col_begin = this->outer[lhs_col]; std::size_t const lhs_col_end = this->outer[lhs_col + 1]; for (std::size_t j = lhs_col_begin; j < lhs_col_end; ++j) { std::size_t const id = this->inner[j]; ret_col[id] += this->values[j] * rhs_col_value; ret_nonzero[id] = true; } } for (std::size_t i = 0; i < ret.rows; ++i) if (ret_nonzero[i]) { ret.inner.push_back(i); ret.values.push_back(ret_col[i]); } } ret.outer[ret.cols] = ret.values.size(); return ret; } template <typename T> SparseMatrix<T> SparseMatrix<T>::parallel_multiply (SparseMatrix const& rhs) const { if (this->cols != rhs.rows) throw std::invalid_argument("Incompatible matrix dimensions"); std::size_t nnz = this->num_non_zero() + rhs.num_non_zero(); SparseMatrix ret(this->rows, rhs.cols); ret.reserve(nnz); std::fill(ret.outer.begin(), ret.outer.end(), 0); std::size_t const chunk_size = 64; std::size_t const num_chunks = ret.cols / chunk_size + (ret.cols % chunk_size != 0); std::size_t const max_threads = std::max(1u, std::thread::hardware_concurrency()); std::size_t const num_threads = std::min(num_chunks, max_threads); #pragma omp parallel num_threads(num_threads) { /* Matrix-matrix multiplication. */ std::vector<T> ret_col(ret.rows, T(0)); std::vector<bool> ret_nonzero(ret.rows, false); std::vector<T> thread_values; thread_values.reserve(nnz / num_chunks); std::vector<std::size_t> thread_inner; thread_inner.reserve(nnz / num_chunks); #pragma omp for ordered schedule(static, 1) #if !defined(_MSC_VER) for (std::size_t chunk = 0; chunk < num_chunks; ++chunk) #else for (int64_t chunk = 0; chunk < num_chunks; ++chunk) #endif { thread_inner.clear(); thread_values.clear(); std::size_t const begin = chunk * chunk_size; std::size_t const end = std::min(begin + chunk_size, ret.cols); for (std::size_t col = begin; col < end; ++col) { std::fill(ret_col.begin(), ret_col.end(), T(0)); std::fill(ret_nonzero.begin(), ret_nonzero.end(), false); std::size_t const rhs_col_begin = rhs.outer[col]; std::size_t const rhs_col_end = rhs.outer[col + 1]; for (std::size_t i = rhs_col_begin; i < rhs_col_end; ++i) { T const& rhs_col_value = rhs.values[i]; std::size_t const lhs_col = rhs.inner[i]; std::size_t const lhs_col_begin = this->outer[lhs_col]; std::size_t const lhs_col_end = this->outer[lhs_col + 1]; for (std::size_t j = lhs_col_begin; j < lhs_col_end; ++j) { std::size_t const id = this->inner[j]; ret_col[id] += this->values[j] * rhs_col_value; ret_nonzero[id] = true; } } for (std::size_t i = 0; i < ret.rows; ++i) if (ret_nonzero[i]) { ret.outer[col + 1] += 1; thread_inner.push_back(i); thread_values.push_back(ret_col[i]); } } #pragma omp ordered { ret.inner.insert(ret.inner.end(), thread_inner.begin(), thread_inner.end()); ret.values.insert(ret.values.end(), thread_values.begin(), thread_values.end()); } } } for (std::size_t col = 0; col < ret.cols; ++col) ret.outer[col + 1] += ret.outer[col]; return ret; } template<typename T> DenseVector<T> SparseMatrix<T>::multiply (DenseVector<T> const& rhs) const { if (rhs.size() != this->cols) throw std::invalid_argument("Incompatible dimensions"); DenseVector<T> ret(this->rows, T(0)); for (std::size_t i = 0; i < this->cols; ++i) for (std::size_t id = this->outer[i]; id < this->outer[i + 1]; ++id) ret[this->inner[id]] += this->values[id] * rhs[i]; return ret; } template<typename T> SparseMatrix<T> SparseMatrix<T>::diagonal_matrix (void) const { std::size_t const diag_size = std::min(this->rows, this->cols); SparseMatrix ret(diag_size, diag_size); ret.reserve(diag_size); for (std::size_t i = 0; i < diag_size; ++i) { ret.outer[i] = ret.values.size(); for (std::size_t j = this->outer[i]; j < this->outer[i + 1]; ++j) if (this->inner[j] == i) { ret.inner.push_back(i); ret.values.push_back(this->values[j]); } else if (this->inner[j] > i) break; } ret.outer[diag_size] = ret.values.size(); return ret; } template<typename T> void SparseMatrix<T>::mult_diagonal (T const& factor) { for (std::size_t i = 0; i < this->outer.size() - 1; ++i) for (std::size_t j = this->outer[i]; j < this->outer[i + 1]; ++j) { if (this->inner[j] == i) this->values[j] *= factor; if (this->inner[j] >= i) break; } } template<typename T> void SparseMatrix<T>::cwise_invert (void) { for (std::size_t i = 0; i < this->values.size(); ++i) this->values[i] = T(1) / this->values[i]; } template<typename T> void SparseMatrix<T>::column_nonzeros (std::size_t col, DenseVector<T>* vector) const { std::size_t const start = this->outer[col]; std::size_t const end = this->outer[col + 1]; vector->resize(end - start); for (std::size_t row = start, i = 0; row < end; ++row, ++i) vector->at(i) = this->values[row]; } template<typename T> inline std::size_t SparseMatrix<T>::num_non_zero (void) const { return this->values.size(); } template<typename T> inline std::size_t SparseMatrix<T>::num_rows (void) const { return this->rows; } template<typename T> inline std::size_t SparseMatrix<T>::num_cols (void) const { return this->cols; } template<typename T> inline T* SparseMatrix<T>::begin (void) { return this->values.data(); } template<typename T> inline T* SparseMatrix<T>::end (void) { return this->values.data() + this->values.size(); } template<typename T> void SparseMatrix<T>::debug (void) const { std::cout << "SparseMatrix (" << this->rows << " rows, " << this->cols << " cols, " << this->num_non_zero() << " values)" << std::endl; std::cout << " Values:"; for (std::size_t i = 0; i < this->values.size(); ++i) std::cout << " " << this->values[i]; std::cout << std::endl << " Inner:"; for (std::size_t i = 0; i < this->inner.size(); ++i) std::cout << " " << this->inner[i]; std::cout << std::endl << " Outer:"; for (std::size_t i = 0; i < this->outer.size(); ++i) std::cout << " " << this->outer[i]; std::cout << std::endl; } SFM_BA_NAMESPACE_END SFM_NAMESPACE_END #endif // SFM_SPARSE_MATRIX_HEADER
dependences.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" #include <omp.h> #include <math.h> #include <unistd.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp master { print_ids(0); #pragma omp task depend(out:x) { x++; delay(100); } print_fuzzy_address(1); print_ids(0); #pragma omp task depend(in:x) { x = -1; } print_ids(0); } } x++; // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_dependences' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_dependence' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[FIRST_TASK:[0-f]+]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependences: task_id=[[FIRST_TASK]], deps={{0x[0-f]+}}, ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[SECOND_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependences: task_id=[[SECOND_TASK]], deps={{0x[0-f]+}}, ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_dependence_pair: first_task_id=[[FIRST_TASK]], second_task_id=[[SECOND_TASK]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] return 0; }
taiko_ranking_map.c
/* * Copyright (©) 2015-2016 Lucas Maugère, Thomas Mijieux * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "osux.h" #include "bpm.h" #include "taiko_ranking_object.h" #include "taiko_ranking_map.h" #include "print.h" #include "tr_db.h" #include "tr_mods.h" #include "compute_stars.h" #include "treatment.h" #include "config.h" #include "check_osu_file.h" #define BASIC_SV 1.4 #define TYPE(type) (type & (~HITOBJECT_NEWCOMBO) & 0x0F) // this get rid of the 'new_combo' flag to get the hit object's type // more easily static int get_tro_type_from_osux_ho(osux_hitobject *ho); static double get_bpm_app_from_osux_tp(osux_timingpoint *tp, double sv); static void trm_add_to_ps(struct tr_map *map, enum played_state ps, int i); static void trm_recompute_acc(struct tr_map *map); static struct tr_map *trm_from_osux_map(osux_beatmap *map); static struct tr_map *trm_from_file(const char *filename); //-------------------------------------------------- void trm_main(const struct tr_map *map) { struct tr_map *map_copy = trm_copy(map); trm_set_mods(map_copy, map->conf->mods); trm_add_modifier(map_copy); trm_apply_mods(map_copy); trm_compute_stars(map_copy); #pragma omp critical trm_print(map_copy); if (GLOBAL_CONFIG->db_enable) trm_db_insert(map_copy); trm_free(map_copy); } //-------------------------------------------------- void trm_set_read_only_objects(struct tr_map *map) { for (int i = 0; i < map->nb_object; i++) map->object[i].objs = map->object; } //-------------------------------------------------- void trm_add_modifier(struct tr_map *map) { if (map->conf->flat) trm_flat_big(map); if (map->conf->no_bonus) trm_remove_bonus(map); } //-------------------------------------------------- void trm_set_mods(struct tr_map *map, int mods) { map->mods = mods; } //-------------------------------------------------- struct tr_map *trm_copy(const struct tr_map *map) { struct tr_map *copy = calloc(sizeof(*copy), 1); memcpy(copy, map, sizeof(*map)); copy->object = tro_copy(map->object, map->nb_object); copy->title = strdup(map->title); copy->artist = strdup(map->artist); copy->source = strdup(map->source); copy->creator = strdup(map->creator); copy->diff = strdup(map->diff); copy->title_uni = strdup(map->title_uni); copy->artist_uni = strdup(map->artist_uni); copy->hash = strdup(map->hash); trm_set_read_only_objects(copy); return copy; } //----------------------------------------------------- void trm_free(struct tr_map *map) { if (map == NULL) return; free(map->title); free(map->artist); free(map->source); free(map->creator); free(map->diff); free(map->title_uni); free(map->artist_uni); free(map->hash); free(map->object); free(map); } //-------------------------------------------------- struct tr_map *trm_new(const char *filename) { struct tr_map *res = NULL; char *path = NULL; switch ( tr_check_file(filename) ) { case TR_FILENAME_OSU_FILE: res = trm_from_file(filename); break; case TR_FILENAME_HASH: if (!GLOBAL_CONFIG->beatmap_db_enable) { tr_error("database lookup disabled"); break; } path = osux_beatmap_db_get_path_by_hash(&GLOBAL_CONFIG->beatmap_db, filename); if (path == NULL) { tr_error("could not find beatmap for hash '%s'", filename); break; } res = trm_from_file(path); break; case TR_FILENAME_ERROR: default: tr_error("Could not load: '%s'", filename); break; } g_free(path); return res; } //--------------------------------------------------------------- //--------------------------------------------------------------- //--------------------------------------------------------------- static int get_tro_type_from_osux_ho(osux_hitobject *ho) { int bits = 0; int sample = ho->hitsound.sample; if (HIT_OBJECT_IS_SLIDER(ho)) bits |= TRO_R; else if (HIT_OBJECT_IS_SPINNER(ho)) bits |= TRO_S; else if (HIT_OBJECT_IS_CIRCLE(ho)) { if ((sample & (SAMPLE_WHISTLE | SAMPLE_CLAP)) != 0) bits |= TRO_K; else bits |= TRO_D; } if ((sample & SAMPLE_FINISH) != 0) return bits | TRO_BIG; else return bits; } //--------------------------------------------------------------- static double get_bpm_app_from_osux_tp(osux_timingpoint *tp, double sv) { double sv_multiplication; if (!tp->inherited) sv_multiplication = 1.; else sv_multiplication = -100. / tp->slider_velocity_multiplier; return (mpb_to_bpm(tp->millisecond_per_beat) * sv_multiplication * (sv / BASIC_SV)); } //--------------------------------------------------------------- //--------------------------------------------------------------- //--------------------------------------------------------------- static struct tr_map *trm_from_file(const char *filename) { osux_beatmap map; if (osux_beatmap_init(&map, filename) < 0) { osux_error("Cannot open beatmap %s\n", filename); return NULL; } struct tr_map *res = trm_from_osux_map(&map); osux_beatmap_free(&map); return res; } //--------------------------------------------------------------- static int osux_map_check_mode(osux_beatmap *map) { switch (map->game_mode) { case GAME_MODE_STD: if (GLOBAL_CONFIG->autoconvert_enable) { if (osux_beatmap_taiko_autoconvert(map) == 0) return 0; tr_error("autoconversion error."); } else tr_error("autoconverting map from std game mode is disabled..."); return -1; case GAME_MODE_TAIKO: return 0; case GAME_MODE_CTB: tr_error("Catch the beat?!"); return -1; case GAME_MODE_MANIA: tr_error("Taiko is not 2k."); // lol return -1; default: break; } return -1; } //--------------------------------------------------------------- static void trm_from_osux_map_objects(struct tr_map *tr_map, const osux_beatmap *map) { tr_map->nb_object = map->hitobject_count; tr_map->object = calloc(sizeof(struct tr_object), map->hitobject_count); unsigned current_tp = 0; tr_map->max_combo = 0; for (unsigned i = 0; i < map->hitobject_count; i++) { while (current_tp < (map->timingpoint_count - 1) && map->timingpoints[current_tp + 1].offset <= map->hitobjects[i].offset) current_tp++; struct tr_object *o = &tr_map->object[i]; osux_hitobject *ho = &map->hitobjects[i]; osux_timingpoint *tp = &map->timingpoints[current_tp]; o->offset = (int) ho->offset; o->bf = get_tro_type_from_osux_ho(ho); o->bpm_app = get_bpm_app_from_osux_tp(tp, map->SliderMultiplier); o->end_offset = ho->end_offset; if (tro_is_bonus(o)) { o->ps = BONUS; } else { tr_map->max_combo++; o->ps = GREAT; } o->objs = NULL; } } static void trm_from_osux_map_meta(struct tr_map *tr_map, const osux_beatmap *map) { tr_map->hash = strdup(map->md5_hash); tr_map->od = map->OverallDifficulty; tr_map->title = strdup(map->Title); tr_map->artist = strdup(map->Artist); tr_map->source = strdup(map->Source); tr_map->creator = strdup(map->Creator); tr_map->diff = strdup(map->Version); tr_map->mapset_osu_ID = map->BeatmapSetID; tr_map->diff_osu_ID = map->BeatmapID; if (map->TitleUnicode == NULL) tr_map->title_uni = strdup(tr_map->title); else tr_map->title_uni = strdup(map->TitleUnicode); if (map->ArtistUnicode == NULL) tr_map->artist_uni = strdup(tr_map->artist); else tr_map->artist_uni = strdup(map->ArtistUnicode); } static void trm_set_ggm_and_acc(struct tr_map *tr_map) { tr_map->great = tr_map->max_combo; tr_map->good = 0; tr_map->miss = 0; tr_map->bonus = tr_map->nb_object - tr_map->max_combo; if (tr_map->max_combo != 0) tr_map->acc = MAX_ACC; else tr_map->acc = 0; } static struct tr_map *trm_from_osux_map(osux_beatmap *map) { if (osux_map_check_mode(map) < 0) return NULL; struct tr_map *tr_map = calloc(sizeof(struct tr_map), 1); trm_from_osux_map_objects(tr_map, map); trm_from_osux_map_meta(tr_map, map); trm_set_ggm_and_acc(tr_map); return tr_map; } //--------------------------------------------------------------- //--------------------------------------------------------------- //--------------------------------------------------------------- static void trm_print_out_tro_header(int filter) { if ((filter & FILTER_BASIC) != 0) fprintf(OUTPUT_INFO, "offset\trest\ttype\tbpm app\tstate\t"); if ((filter & FILTER_BASIC_PLUS) != 0) fprintf(OUTPUT_INFO, "offset\tend\trest\ttype\tbpm app\tstate\t"); if ((filter & FILTER_ADDITIONNAL) != 0) fprintf(OUTPUT_INFO, "l hand\tr hand\tobj app\tobj dis\t"); if ((filter & FILTER_DENSITY) != 0) fprintf(OUTPUT_INFO, "dnst rw\tdnst cl\tdnst*\t"); if ((filter & FILTER_READING) != 0) fprintf(OUTPUT_INFO, "app\tdis\tseen\tread*\t"); if ((filter & FILTER_READING_PLUS) != 0) fprintf(OUTPUT_INFO, "app\tend app\tdis\tend dis\tenddis2\tline_a\tb\t" "b_end\tseen\tread*\t"); if ((filter & FILTER_ACCURACY) != 0) fprintf(OUTPUT_INFO, "slow\thitwin\tspc\tacc*\t"); if ((filter & FILTER_PATTERN) != 0) fprintf(OUTPUT_INFO, "proba\tpattrn\tpttrn*\t"); if ((filter & FILTER_STAR) != 0) fprintf(OUTPUT_INFO, "dst*\tread*\tptrn*\tacc*\tfin*\t"); fprintf(OUTPUT_INFO, "\n"); } void trm_print_out_tro(const struct tr_map *map, int filter) { trm_print_out_tro_header(filter); for (int i = 0; i < map->nb_object; ++i) tro_print(&map->object[i], filter); } //------------------------------------------------- #define CASE_PRINT(C, STAR) \ case C: \ fprintf(OUTPUT, "%.4g\t", STAR); \ break static void trm_print_out_results(const struct tr_map *map) { char *order = GLOBAL_CONFIG->print_order; int i = 0; while (order[i]) { switch (order[i]) { CASE_PRINT('F', map->final_star); CASE_PRINT('D', map->density_star); CASE_PRINT('R', map->reading_star); CASE_PRINT('P', map->pattern_star); CASE_PRINT('A', map->accuracy_star); default: break; } i++; } fprintf(OUTPUT, "(%.4g%%)\t", map->acc); trm_print_out_mods(map); print_string_size(map->diff, 24, OUTPUT); print_string_size(map->title, 32, OUTPUT); print_string_size(map->creator, 16, OUTPUT); fprintf(OUTPUT, "\n"); } //-------------------------------------------------- static char *yaml_prefix = "maps: ["; void tr_print_yaml_exit(void) { if (GLOBAL_CONFIG->print_yaml) { if (yaml_prefix[0] != 'm') fprintf(OUTPUT, "]\n"); else fprintf(OUTPUT, "%s]\n", yaml_prefix); } } static void fprintf_escape_char(FILE *out, const char *s, char c, const char *escaped) { if (s == NULL || s[0] == '\0') return; char *str = strdup(s); char ch[2] = { c, '\0'}; char *token = strtok(str, ch); while (1) { fprintf(out, "%s", token); token = strtok(NULL, ch); if (token != NULL) fprintf(out, "%s", escaped); else break; } free(str); } #define fprintf_dquote_escape(out, str) \ fprintf_escape_char(out, str, '"', "\\\"") void trm_print_yaml(const struct tr_map *map) { char *mods = trm_mods_to_str(map); fprintf(OUTPUT, "%s{", yaml_prefix); fprintf(OUTPUT, "title: \""); fprintf_dquote_escape(OUTPUT, map->title); fprintf(OUTPUT, "\", title_uni: \""); fprintf_dquote_escape(OUTPUT, map->title_uni); fprintf(OUTPUT, "\", artist: \""); fprintf_dquote_escape(OUTPUT, map->artist); fprintf(OUTPUT, "\", artist_uni: \""); fprintf_dquote_escape(OUTPUT, map->artist_uni); fprintf(OUTPUT, "\", source: \""); fprintf_dquote_escape(OUTPUT, map->source); fprintf(OUTPUT, "\", creator: \""); fprintf_dquote_escape(OUTPUT, map->creator); fprintf(OUTPUT, "\", difficulty: \""); fprintf_dquote_escape(OUTPUT, map->diff); fprintf(OUTPUT, "\", "); fprintf(OUTPUT, "accuracy: %g, ", map->acc); fprintf(OUTPUT, "great: %d, ", map->great); fprintf(OUTPUT, "good: %d, ", map->good); fprintf(OUTPUT, "miss: %d, ", map->miss); fprintf(OUTPUT, "bonus: %d, ", map->bonus); fprintf(OUTPUT, "max_combo: %d, ", map->max_combo); fprintf(OUTPUT, "combo: %d, ", map->combo); fprintf(OUTPUT, "mods: \"%s\", ", mods); fprintf(OUTPUT, "stars: {"); fprintf(OUTPUT, "density_star: %g, ", map->density_star); fprintf(OUTPUT, "pattern_star: %g, ", map->pattern_star); fprintf(OUTPUT, "reading_star: %g, ", map->reading_star); fprintf(OUTPUT, "accuracy_star: %g, ", map->accuracy_star); fprintf(OUTPUT, "final_star: %g", map->final_star); fprintf(OUTPUT, "}"); if (GLOBAL_CONFIG->print_tro) { fprintf(OUTPUT, ", objects: ["); for (int i = 0; i < map->nb_object; i++) { tro_print_yaml(&map->object[i]); if (i != map->nb_object - 1) fprintf(OUTPUT, ", "); } fprintf(OUTPUT, "]"); } fprintf(OUTPUT, "}"); free(mods); yaml_prefix = ", "; } //-------------------------------------------------- static void trm_print_out(const struct tr_map *map) { if (GLOBAL_CONFIG->print_tro) trm_print_out_tro(map, GLOBAL_CONFIG->print_filter); trm_print_out_results(map); } //-------------------------------------------------- void trm_print(const struct tr_map *map) { if (GLOBAL_CONFIG->print_yaml) trm_print_yaml(map); else trm_print_out(map); } //-------------------------------------------------- //-------------------------------------------------- //-------------------------------------------------- int trm_get_hardest_tro(const struct tr_map *map) { int best = 0; for (int i = 0; i < map->nb_object; i++) if (map->object[i].final_star >= map->object[best].final_star && map->object[i].ps == GREAT) best = i; return best; } //-------------------------------------------------- int trm_get_best_influence_tro(const struct tr_map *map) { int best = -1; double star = map->final_star; for (int i = 0; i < map->nb_object; i++) { if (map->object[i].ps != GREAT) continue; struct tr_map *map_copy = trm_copy(map); trm_set_tro_ps(map_copy, i, MISS); trm_compute_stars(map_copy); if (star > map_copy->final_star) { best = i; star = map_copy->final_star; } trm_free(map_copy); } return best; } //-------------------------------------------------- static void trm_add_to_ps(struct tr_map *map, enum played_state ps, int i) { switch (ps) { case GREAT: map->great += i; break; case GOOD: map->good += i; break; case MISS: map->miss += i; break; case BONUS: tr_error("Cannot change bonus!"); break; } } //-------------------------------------------------- void trm_set_tro_ps(struct tr_map *map, int x, enum played_state ps) { if (map->object[x].ps == ps) tr_error("Object is already with the played state wanted."); trm_add_to_ps(map, map->object[x].ps, -1); trm_add_to_ps(map, ps, 1); map->object[x].ps = ps; trm_recompute_acc(map); if (ps == GOOD) { map->object[x].density_star = 0; map->object[x].reading_star = 0; map->object[x].pattern_star = 0; map->object[x].accuracy_star = 0; map->object[x].final_star = 0; } else { trm_set_combo(map); } } //-------------------------------------------------- double compute_acc(int great, int good, int miss) { return (great + good * 0.5) / (great + good + miss) * MAX_ACC; } static void trm_recompute_acc(struct tr_map *map) { map->acc = compute_acc(map->great, map->good, map->miss); } //-------------------------------------------------- void trm_flat_big(struct tr_map *map) { for (int i = 0; i < map->nb_object; i++) { map->object[i].bf &= ~TRO_BIG; // remove big field } } void trm_remove_tro(struct tr_map *map, int o) { for (int i = o; i < map->nb_object - 1; i++) map->object[i] = map->object[i+1]; map->nb_object--; } void trm_remove_bonus(struct tr_map *map) { for (int i = 0; i < map->nb_object; i++) if (tro_is_bonus(&map->object[i])) trm_remove_tro(map, i); }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } #ifndef _WIN32 /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } #endif /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
.body.c
#define S1(zT0,zT1,zT2,zT3,i,j) B[i][j]=A[i][j]+u1[i]*v1[j]+u2[i]*v2[j]; #define S2(zT0,zT1,zT2,zT3,i,j) x[i]=x[i]+beta*B[j][i]*y[j]; #define S3(zT0,zT1,zT2,zT3,i) x[i]=x[i]+z[i]; #define S4(zT0,zT1,zT2,zT3,i,j) w[i]=w[i]+alpha*B[i][j]*x[j]; int t0, t1, t2, t3, t4, t5, t6, t7; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLUTO-produced CLooG file by CLooG v0.14.1 64 bits in 0.03s. */ lb1=0; ub1=floord(N-1,8000); #pragma omp parallel for shared(t0,lb1,ub1) private(t1,t2,t3,t4,t5,t6,t7) for (t1=lb1; t1<=ub1; t1++) { for (t2=0;t2<=floord(N-1,256);t2++) { for (t3=max(20*t1,0);t3<=min(20*t1+19,floord(N-1,400));t3++) { for (t4=max(0,16*t2);t4<=min(16*t2+15,floord(N-1,16));t4++) { for (t5=max(0,16*t4);t5<=min(N-1,16*t4+15);t5++) { { lbv=max(0,400*t3); ubv=min(N-1,400*t3+399); #pragma ivdep #pragma vector always for (t6=lbv; t6<=ubv; t6++) { S1(t1,t2,t3,t4,t5,t6) ; S2(t1,t2,t3,t4,t6,t5) ; } } } } } } } lb1=0; ub1=floord(N-1,8000); #pragma omp parallel for shared(t0,lb1,ub1) private(t1,t2,t3,t4,t5,t6,t7) for (t1=lb1; t1<=ub1; t1++) { for (t3=max(20*t1,0);t3<=min(20*t1+19,floord(N-1,400));t3++) { for (t4=max(0,0);t4<=min(0,15);t4++) { { lbv=max(0,400*t3); ubv=min(N-1,400*t3+399); #pragma ivdep #pragma vector always for (t6=lbv; t6<=ubv; t6++) { S3(t1,0,t3,t4,t6) ; } } } } } lb1=0; ub1=floord(N-1,8000); #pragma omp parallel for shared(t0,lb1,ub1) private(t1,t2,t3,t4,t5,t6,t7) for (t1=lb1; t1<=ub1; t1++) { for (t2=0;t2<=floord(N-1,256);t2++) { for (t3=max(20*t1,0);t3<=min(20*t1+19,floord(N-1,400));t3++) { for (t4=max(16*t2,0);t4<=min(floord(N-1,16),16*t2+15);t4++) { for (t5=max(0,16*t4);t5<=min(N-1,16*t4+15);t5++) { { lbv=max(0,400*t3); ubv=min(N-1,400*t3+399); #pragma ivdep #pragma vector always for (t6=lbv; t6<=ubv; t6++) { S4(t1,t2,t3,t4,t6,t5) ; } } } } } } } /* End of CLooG code */
residual_based_adjoint_bossak_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // #if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED) #define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED // System includes #include <vector> #include <string> #include <unordered_set> #include <functional> // External includes // Project includes #include "includes/define.h" #include "includes/checks.h" #include "includes/kratos_parameters.h" #include "solving_strategies/schemes/scheme.h" #include "response_functions/adjoint_response_function.h" #include "utilities/variable_utils.h" #include "utilities/indirect_scalar.h" #include "utilities/adjoint_extensions.h" #include "utilities/parallel_utilities.h" namespace Kratos { ///@name Kratos Classes ///@{ /// A scheme for dynamic adjoint equations, using Bossak time integration. /** * It can be used for either first- or second-order time derivatives. Elements * and conditions must provide a specialization of AdjointExtensions via their * data value container, which allows the scheme to operate independently of * the variable arrangements in the element or condition. */ template <class TSparseSpace, class TDenseSpace> class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TSystemMatrixType SystemMatrixType; typedef typename BaseType::TSystemVectorType SystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::DofsArrayType DofsArrayType; ///@} ///@name Life Cycle ///@{ /// Constructor. ResidualBasedAdjointBossakScheme( Parameters Settings, AdjointResponseFunction::Pointer pResponseFunction ) : mpResponseFunction(pResponseFunction) { Parameters default_parameters(R"({ "name" : "adjoint_bossak", "scheme_type" : "bossak", "alpha_bossak" : -0.3 })"); Settings.ValidateAndAssignDefaults(default_parameters); mBossak.Alpha = Settings["alpha_bossak"].GetDouble(); } /// Destructor. ~ResidualBasedAdjointBossakScheme() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ int Check(const ModelPart& rModelPart) const override { KRATOS_TRY std::vector<const VariableData*> lambda2_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { rExtensions.GetFirstDerivativesVariables(rVec); }); std::vector<const VariableData*> lambda3_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetSecondDerivativesVariables(rVec); }); std::vector<const VariableData*> auxiliary_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetAuxiliaryVariables(rVec); }); KRATOS_ERROR_IF(lambda2_vars.size() != lambda3_vars.size()) << "First derivatives variable list and second derivatives " "variables list size mismatch.\n"; KRATOS_ERROR_IF(lambda2_vars.size() != auxiliary_vars.size()) << "First derivatives variable list and auxiliary variables list " "size mismatch.\n"; for (unsigned int i_var = 0; i_var < lambda2_vars.size(); ++i_var) { const auto& r_lambda2_variable_name = lambda2_vars[i_var]->Name(); const auto& r_lambda3_variable_name = lambda3_vars[i_var]->Name(); const auto& r_auxiliary_variable_name = auxiliary_vars[i_var]->Name(); if (KratosComponents<Variable<array_1d<double, 3>>>::Has(r_lambda2_variable_name)) { CheckVariables<array_1d<double, 3>>(rModelPart, r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else if (KratosComponents<Variable<double>>::Has(r_lambda2_variable_name)) { CheckVariables<double>(rModelPart, r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else { KRATOS_ERROR << "Unsupported variable type " << r_lambda2_variable_name << "."; } } return BaseType::Check(rModelPart); KRATOS_CATCH(""); } void Initialize(ModelPart& rModelPart) override { KRATOS_TRY; BaseType::Initialize(rModelPart); // Allocate auxiliary memory. int num_threads = OpenMPUtils::GetNumThreads(); mLeftHandSide.resize(num_threads); mResponseGradient.resize(num_threads); mFirstDerivsLHS.resize(num_threads); mFirstDerivsResponseGradient.resize(num_threads); mSecondDerivsLHS.resize(num_threads); mSecondDerivsResponseGradient.resize(num_threads); mAdjointValuesVector.resize(num_threads); mAdjointIndirectVector2.resize(num_threads); mAdjointIndirectVector3.resize(num_threads); mAuxAdjointIndirectVector1.resize(num_threads); VariableUtils().SetNonHistoricalVariableToZero(NUMBER_OF_NEIGHBOUR_ELEMENTS, rModelPart.Nodes()); rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha; KRATOS_CATCH(""); } void InitializeSolutionStep( ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); const auto& r_current_process_info = rModelPart.GetProcessInfo(); mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info)); this->CalculateNodeNeighbourCount(rModelPart); KRATOS_CATCH(""); } void FinalizeSolutionStep( ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); this->UpdateAuxiliaryVariable(rModelPart); KRATOS_CATCH(""); } void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; // Update degrees of freedom: adjoint variables associated to the // residual of the physical problem. this->mpDofUpdater->UpdateDofs(rDofSet, rDx); // Update adjoint variables associated to time integration. this->UpdateTimeSchemeAdjoints(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); const auto& r_const_elem_ref = rCurrentElement; r_const_elem_ref.GetValuesVector(mAdjointValuesVector[k]); const auto local_size = mAdjointValuesVector[k].size(); if (rRHS_Contribution.size() != local_size) { rRHS_Contribution.resize(local_size, false); } if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size) { rLHS_Contribution.resize(local_size, local_size, false); } this->CheckAndResizeThreadStorage(local_size); this->CalculateGradientContributions(rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateFirstDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateSecondDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculatePreviousTimeStepContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateResidualLocalContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); rCurrentElement.EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentElement, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); const auto& r_const_cond_ref = rCurrentCondition; r_const_cond_ref.GetValuesVector(mAdjointValuesVector[k]); const auto local_size = mAdjointValuesVector[k].size(); if (rRHS_Contribution.size() != local_size) { rRHS_Contribution.resize(local_size, false); } if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size) { rLHS_Contribution.resize(local_size, local_size, false); } this->CheckAndResizeThreadStorage(local_size); this->CalculateGradientContributions(rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateFirstDerivativeContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateSecondDerivativeContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); // It is not required to call CalculatePreviousTimeStepContributions here again // since, the previous time step contributions from conditions are stored in variables // mentioned in AdjointExtensions, and they are added via CalculateSystemContributions<ElementType> // method. this->CalculateResidualLocalContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution( Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentCondition, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedAdjointBossakScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected member Variables ///@{ struct BossakConstants { double Alpha; double Beta; double Gamma; double C0; double C1; double C2; double C3; double C4; double C5; double C6; double C7; }; ///@} ///@name Protected Operations ///@{ /** * @brief Calculates elemental residual * * \[ * \underline{F} = \underline{F} - \mathbf{\underline{K}}\underline{\lambda}_1 * \] * * @param rCurrentElement Current element * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ virtual void CalculateResidualLocalContributions( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityResidualLocalContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); } /** * @brief Calculates condition residual * * \[ * \underline{F} = \underline{F} - \mathbf{\underline{K}}\underline{\lambda}_1 * \] * * @param rCurrentCondition Current condition * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ virtual void CalculateResidualLocalContributions( Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityResidualLocalContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); } /** * @brief Calculate time scheme contributions from elements * * @param rElement * @param rAdjointTimeSchemeValues2 * @param rAdjointTimeSchemeValues3 * @param rCurrentProcessInfo */ virtual void CalculateTimeSchemeContributions( Element& rElement, LocalSystemVectorType& rAdjointTimeSchemeValues2, LocalSystemVectorType& rAdjointTimeSchemeValues3, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityTimeSchemeContributions(rElement, rAdjointTimeSchemeValues2, rAdjointTimeSchemeValues3, rCurrentProcessInfo); } /** * @brief Calculates time scheme contributions from conditions * * @param rCondition * @param rAdjointTimeSchemeValues2 * @param rAdjointTimeSchemeValues3 * @param rCurrentProcessInfo */ virtual void CalculateTimeSchemeContributions( Condition& rCondition, LocalSystemVectorType& rAdjointTimeSchemeValues2, LocalSystemVectorType& rAdjointTimeSchemeValues3, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityTimeSchemeContributions(rCondition, rAdjointTimeSchemeValues2, rAdjointTimeSchemeValues3, rCurrentProcessInfo); } /** * @brief Calculates auxiliary variable contributions from elements * * @param rElement * @param rAdjointAuxiliaryValues * @param rCurrentProcessInfo */ virtual void CalculateAuxiliaryVariableContributions( Element& rElement, LocalSystemVectorType& rAdjointAuxiliaryValues, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityAuxiliaryVariableContributions( rElement, rAdjointAuxiliaryValues, rCurrentProcessInfo); } /** * @brief Calculates auxiliary contributions from conditions * * @param rCondition * @param rAdjointAuxiliaryValues * @param rCurrentProcessInfo */ virtual void CalculateAuxiliaryVariableContributions( Condition& rCondition, LocalSystemVectorType& rAdjointAuxiliaryValues, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityAuxiliaryVariableContributions( rCondition, rAdjointAuxiliaryValues, rCurrentProcessInfo); } ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ BossakConstants mBossak; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); AdjointResponseFunction::Pointer mpResponseFunction; std::vector<LocalSystemMatrixType> mLeftHandSide; std::vector<LocalSystemVectorType> mResponseGradient; std::vector<LocalSystemMatrixType> mFirstDerivsLHS; std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient; std::vector<LocalSystemMatrixType> mSecondDerivsLHS; std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient; std::vector<LocalSystemVectorType> mAdjointValuesVector; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3; std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1; ///@} ///@name Private Operations ///@{ /** * @brief Calculates entity residual * * \[ * \underline{F} = \underline{F} - \mathbf{\underline{K}}\underline{\lambda}_1 * \] * * @tparam TEntityType * @param rCurrentEntity Current Entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateEntityResidualLocalContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_residual_adjoint = mAdjointValuesVector[k]; const auto& r_const_entity_ref = rCurrentEntity; r_const_entity_ref.GetValuesVector(r_residual_adjoint); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint); } /** * @brief Calculates entity first derivative contributions for adjoint system * * \[ * \mathbf{\underline{K}} = \mathbf{\underline{K}} + \frac{\partial \underline{R}^n}{\partial \underline{w}^n} \\ * \underline{F} = \underline{F} - \frac{\partial J^n}{\partial \underline{w}^n} * \] * * @tparam TEntityType * @param rCurrentEntity Current entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateGradientContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentEntity.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo); this->mpResponseFunction->CalculateGradient( rCurrentEntity, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) = mLeftHandSide[k]; noalias(rRHS_Contribution) = -1. * mResponseGradient[k]; } /** * @brief Calculates element first derivative contributions to adjoint system * * \[ * \mathbf{\underline{K}} = \mathbf{\underline{K}} + \frac{\gamma}{\beta \Delta t} \frac{\partial \underline{R}^n}{\partial \underline{\dot{w}}^n} \\ * \underline{F} = \underline{F} - \frac{\gamma}{\beta \Delta t} \frac{\partial J^n}{\partial \underline{\dot{w}}^n} * \] * * @tparam TEntityType * @param rCurrentEntity Current entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateFirstDerivativeContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentEntity.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo); mpResponseFunction->CalculateFirstDerivativesGradient( rCurrentEntity, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C6 * mFirstDerivsResponseGradient[k]; } /** * @brief Calculates element second derivative contributions for adjoint system * * \[ * \mathbf{\underline{K}} = \mathbf{\underline{K}} + \frac{1 - \alpha}{\beta\Delta t^2}\frac{\partial \underline{R}^n}{\partial \underline{\ddot{w}}^n} \\ * \underline{F} = \underline{F} - \frac{1}{\beta\Delta t^2}\frac{\partial J^n}{\partial \underline{\ddot{w}}^n} * \] * * @tparam TEntityType * @param rCurrentEntity Current entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateSecondDerivativeContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentEntity.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); this->mpResponseFunction->CalculateSecondDerivativesGradient( rCurrentEntity, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C7 * mSecondDerivsResponseGradient[k]; } /** * @brief Calculates previous time step contributions from elements to adjoint system * * No need to use again conditions version of this since elements includes condition nodes as well. * Therefore, this will add automatically condition contributions as well. * * \underline{F} = * \underline{F} * - \frac{1}{\beta\Delta t^2}\left[\frac{\partial \underline{R}^{n+1}}{\underline{\ddot{w}}^n}\right]^T\underline{\lambda}_1^{n+1} * - \frac{1}{\beta\Delta t^2}\frac{\partial J^{n+1}}{\underline{\ddot{w}}^n} * + \frac{\beta - \gamma\left(\gamma + \frac{1}{2}\right)}{\beta^2\Delta t}\underline{\lambda}_2^{n+1} * - \frac{\gamma + \frac{1}{2}}{\beta^2\Delta t^2}\underline{\lambda}_3^{n+1} * * @param rCurrentElement Current element * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ void CalculatePreviousTimeStepContributions( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { const auto& r_geometry = rCurrentElement.GetGeometry(); const auto k = OpenMPUtils::ThisThread(); auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS); unsigned local_index = 0; for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { auto& r_node = r_geometry[i_node]; r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1); r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { rRHS_Contribution[local_index] += weight * (mBossak.C7 * mAuxAdjointIndirectVector1[k][d] + mBossak.C4 * mAdjointIndirectVector2[k][d] + mBossak.C5 * mAdjointIndirectVector3[k][d]); ++local_index; } } } /** * @brief Calculates entity time scheme contributions as depicted. * * \[ * rAdjointTimeSchemeValues2 = * - \frac{\partial J^{n}}{\partial \underline{\dot{w}}^n} * - \left[\frac{\partial \underline{R}^{n}}{\partial \underline{\dot{w}}}\right]^T\underline{\lambda}_1^{n+1} * \] * \[ * rAdjointTimeSchemeValues3 = * - \frac{\partial J^{n}}{\partial \underline{\ddot{w}}^n} * - \left(1-\alpha\right)\left[\frac{\partial \underline{R}^{n}}{\partial \underline{\ddot{w}}^n}\right]^T\underline{\lambda}_1^{n+1} * \] * * @tparam TEntityType * @param rCurrentEntity * @param rAdjointTimeSchemeValues2 * @param rAdjointTimeSchemeValues3 * @param rProcessInfo */ template<class TEntityType> void CalculateEntityTimeSchemeContributions( TEntityType& rCurrentEntity, LocalSystemVectorType& rAdjointTimeSchemeValues2, LocalSystemVectorType& rAdjointTimeSchemeValues3, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int k = OpenMPUtils::ThisThread(); const auto& r_const_entity_ref = rCurrentEntity; r_const_entity_ref.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); /// starting to build residual for next time step calculations rCurrentEntity.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rProcessInfo); this->mpResponseFunction->CalculateFirstDerivativesGradient( rCurrentEntity, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], rProcessInfo); rCurrentEntity.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rProcessInfo); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); this->mpResponseFunction->CalculateSecondDerivativesGradient( rCurrentEntity, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rProcessInfo); if (rAdjointTimeSchemeValues2.size() != mFirstDerivsResponseGradient[k].size()) rAdjointTimeSchemeValues2.resize(mFirstDerivsResponseGradient[k].size(), false); noalias(rAdjointTimeSchemeValues2) = -mFirstDerivsResponseGradient[k] - prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]); if (rAdjointTimeSchemeValues3.size() != mSecondDerivsResponseGradient[k].size()) rAdjointTimeSchemeValues3.resize(mSecondDerivsResponseGradient[k].size(), false); noalias(rAdjointTimeSchemeValues3) = -mSecondDerivsResponseGradient[k] - prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]); KRATOS_CATCH(""); } /** * @brief Calculates contributions from each entity for auxiliary variable as depicted * * rAdjointAuxiliaryValues = * - \frac{\partial J^{n+1}}{\partial \underline{\ddot{w}}^n} * - \alpha \left[\frac{\partial \underline{R}^{n+1}}{\partial \underline{\ddot{w}}^n}\right]^T\underline{\lambda}_1^{n+1} * * @tparam TEntityType * @param rCurrentEntity * @param rAdjointAuxiliaryValues * @param rProcessInfo */ template <class TEntityType> void CalculateEntityAuxiliaryVariableContributions( TEntityType& rCurrentEntity, LocalSystemVectorType& rAdjointAuxiliaryValues, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int k = OpenMPUtils::ThisThread(); const auto& r_const_entity_ref = rCurrentEntity; r_const_entity_ref.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); rCurrentEntity.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rProcessInfo); mSecondDerivsLHS[k] *= mBossak.Alpha; this->mpResponseFunction->CalculateSecondDerivativesGradient( rCurrentEntity, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rProcessInfo); if (rAdjointAuxiliaryValues.size() != mSecondDerivsLHS[k].size1()) rAdjointAuxiliaryValues.resize(mSecondDerivsLHS[k].size1(), false); noalias(rAdjointAuxiliaryValues) = prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) + mSecondDerivsResponseGradient[k]; KRATOS_CATCH(""); } void CalculateNodeNeighbourCount(ModelPart& rModelPart) { // Calculate number of neighbour elements for each node. VariableUtils().SetNonHistoricalVariableToZero(NUMBER_OF_NEIGHBOUR_ELEMENTS, rModelPart.Nodes()); block_for_each(rModelPart.Elements(), [&](ModelPart::ElementType& rElement) { auto& r_geometry = rElement.GetGeometry(); for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j) { double& r_num_neighbour = r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); #pragma omp atomic r_num_neighbour += 1.0; } }); rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS); } void UpdateTimeSchemeAdjoints(ModelPart& rModelPart) { KRATOS_TRY; std::vector<const VariableData*> lambda2_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { rExtensions.GetFirstDerivativesVariables(rVec); }); std::vector<const VariableData*> lambda3_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetSecondDerivativesVariables(rVec); }); std::vector<const VariableData*> auxiliary_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetAuxiliaryVariables(rVec); }); SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes()); SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes()); const auto& r_process_info = rModelPart.GetProcessInfo(); UpdateEntityTimeSchemeContributions(rModelPart.Elements(), r_process_info); UpdateEntityTimeSchemeContributions(rModelPart.Conditions(), r_process_info); // Finalize global assembly Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator()); Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator()); for (unsigned int i_var = 0; i_var < lambda2_vars.size(); ++i_var) { const auto& r_lambda2_variable_name = lambda2_vars[i_var]->Name(); const auto& r_lambda3_variable_name = lambda3_vars[i_var]->Name(); const auto& r_auxiliary_variable_name = auxiliary_vars[i_var]->Name(); if (KratosComponents<Variable<array_1d<double, 3>>>::Has(r_lambda2_variable_name)) { UpdateTimeSchemeVariablesFromOldContributions<array_1d<double, 3>>( rModelPart.Nodes(), r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else if (KratosComponents<Variable<double>>::Has(r_lambda2_variable_name)) { UpdateTimeSchemeVariablesFromOldContributions<double>( rModelPart.Nodes(), r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else { KRATOS_ERROR << "Unsupported variable type " << r_lambda2_variable_name << "."; } } KRATOS_CATCH(""); } /** * @brief Updates time scheme variables in nodes of model part * * @tparam TEntityContainerType * @param rEntityContainer * @param rProcessInfo */ template <class TEntityContainerType> void UpdateEntityTimeSchemeContributions( TEntityContainerType& rEntityContainer, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int number_of_elements = rEntityContainer.size(); Vector adjoint2_aux, adjoint3_aux; #pragma omp parallel for private(adjoint2_aux, adjoint3_aux) for (int i = 0; i < number_of_elements; ++i) { auto& r_entity = *(rEntityContainer.begin() + i); const int k = OpenMPUtils::ThisThread(); this->CalculateTimeSchemeContributions( r_entity, adjoint2_aux, adjoint3_aux, *this->mpResponseFunction, mBossak, rProcessInfo); auto& r_extensions = *r_entity.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; auto& r_geometry = r_entity.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 0); r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 0); auto& r_node = r_geometry[i_node]; r_node.SetLock(); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index]; mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index]; ++local_index; } r_node.UnSetLock(); } } KRATOS_CATCH(""); } /** * @brief Update nodal variables with contributions from previous time step adjoint variables * * @tparam TDataType * @param rNodes * @param rLambda2VariableName * @param rLambda3VariableName * @param rAuxiliaryVariableName */ template<class TDataType> void UpdateTimeSchemeVariablesFromOldContributions( ModelPart::NodesContainerType& rNodes, const std::string& rLambda2VariableName, const std::string& rLambda3VariableName, const std::string& rAuxiliaryVariableName) { KRATOS_TRY const auto& r_lambda2_variable = KratosComponents<Variable<TDataType>>::Get(rLambda2VariableName); const auto& r_lambda3_variable = KratosComponents<Variable<TDataType>>::Get(rLambda3VariableName); const auto& r_auxiliary_variable = KratosComponents<Variable<TDataType>>::Get(rAuxiliaryVariableName); block_for_each(rNodes, [&](ModelPart::NodeType& rNode) { const TDataType& r_old_lambda2_value = rNode.FastGetSolutionStepValue(r_lambda2_variable, 1); const TDataType& r_old_lambda3_value = rNode.FastGetSolutionStepValue(r_lambda3_variable, 1); TDataType& r_lambda2_value = rNode.FastGetSolutionStepValue(r_lambda2_variable); r_lambda2_value += r_old_lambda2_value * mBossak.C0; r_lambda2_value += r_old_lambda3_value * mBossak.C1; TDataType& r_lambda3_value = rNode.FastGetSolutionStepValue(r_lambda3_variable); r_lambda3_value += r_old_lambda2_value * mBossak.C2; r_lambda3_value += r_old_lambda3_value * mBossak.C3; r_lambda3_value += rNode.FastGetSolutionStepValue(r_auxiliary_variable, 1); }); KRATOS_CATCH(""); } /** * @brief Update auxiliary variable to be used in next time step * * @param rModelPart */ void UpdateAuxiliaryVariable(ModelPart& rModelPart) { KRATOS_TRY; std::vector<const VariableData*> aux_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rOut) { return rExtensions.GetAuxiliaryVariables(rOut); }); SetToZero_AdjointVars(aux_vars, rModelPart.Nodes()); const auto& r_process_info = rModelPart.GetProcessInfo(); // Loop over elements to assemble the remaining terms UpdateEntityAuxiliaryVariableContributions(rModelPart.Elements(), r_process_info); // Loop over conditions to assemble the remaining terms UpdateEntityAuxiliaryVariableContributions(rModelPart.Conditions(), r_process_info); // Finalize global assembly Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } /** * @brief Updates auxiliary variables in the model part * * @tparam TEntityContainerType * @param rEntityContainer * @param rProcessInfo */ template <class TEntityContainerType> void UpdateEntityAuxiliaryVariableContributions( TEntityContainerType& rEntityContainer, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int number_of_entities = rEntityContainer.size(); Vector aux_adjoint_vector; #pragma omp parallel for private(aux_adjoint_vector) for (int i = 0; i < number_of_entities; ++i) { auto& r_entity = *(rEntityContainer.begin() + i); const int k = OpenMPUtils::ThisThread(); this->CalculateAuxiliaryVariableContributions( r_entity, aux_adjoint_vector, *this->mpResponseFunction, mBossak, rProcessInfo); auto& r_extensions = *r_entity.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; auto& r_geometry = r_entity.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { auto& r_node = r_geometry[i_node]; r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0); r_node.SetLock(); for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d) { mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index]; ++local_index; } r_node.UnSetLock(); } } KRATOS_CATCH(""); } /** * @brief Check for variable types * * @tparam TDataType * @param rModelPart * @param rLambda2VariableName * @param rLambda3VariableName * @param rAuxiliaryVariableName */ template<class TDataType> void CheckVariables( const ModelPart& rModelPart, const std::string& rLambda2VariableName, const std::string& rLambda3VariableName, const std::string& rAuxiliaryVariableName) const { KRATOS_TRY KRATOS_ERROR_IF(!KratosComponents<Variable<TDataType>>::Has(rLambda2VariableName)) << "Adjoint variable " << rLambda2VariableName << " is not found in variable list with required type.\n"; KRATOS_ERROR_IF(!KratosComponents<Variable<TDataType>>::Has(rLambda3VariableName)) << "Adjoint variable " << rLambda3VariableName << " is not found in variable list with required type.\n"; KRATOS_ERROR_IF(!KratosComponents<Variable<TDataType>>::Has(rAuxiliaryVariableName)) << "Adjoint variable " << rAuxiliaryVariableName << " is not found in variable list with required type.\n"; const auto& r_lambda2_variable = KratosComponents<Variable<TDataType>>::Get(rLambda2VariableName); const auto& r_lambda3_variable = KratosComponents<Variable<TDataType>>::Get(rLambda3VariableName); const auto& r_auxiliary_variable = KratosComponents<Variable<TDataType>>::Get(rAuxiliaryVariableName); KRATOS_ERROR_IF(!rModelPart.HasNodalSolutionStepVariable(r_lambda2_variable)) << "Lambda 2 Variable " << rLambda2VariableName << " not found in nodal solution step variables list of " << rModelPart.Name() << ".\n"; KRATOS_ERROR_IF(!rModelPart.HasNodalSolutionStepVariable(r_lambda3_variable)) << "Lambda 3 Variable " << rLambda3VariableName << " not found in nodal solution step variables list of " << rModelPart.Name() << ".\n"; KRATOS_ERROR_IF(!rModelPart.HasNodalSolutionStepVariable(r_auxiliary_variable)) << "Auxiliary Variable " << rAuxiliaryVariableName << " not found in nodal solution step variables list of " << rModelPart.Name() << ".\n"; KRATOS_CATCH(""); } void CheckAndResizeThreadStorage(unsigned SystemSize) { const int k = OpenMPUtils::ThisThread(); if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize) { mLeftHandSide[k].resize(SystemSize, SystemSize, false); } if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize) { mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize) { mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mResponseGradient[k].size() != SystemSize) { mResponseGradient[k].resize(SystemSize, false); } if (mFirstDerivsResponseGradient[k].size() != SystemSize) { mFirstDerivsResponseGradient[k].resize(SystemSize, false); } if (mSecondDerivsResponseGradient[k].size() != SystemSize) { mSecondDerivsResponseGradient[k].resize(SystemSize, false); } } static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime) { BossakConstants bc; bc.Alpha = Alpha; bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha); bc.Gamma = 0.5 - bc.Alpha; bc.C0 = 1.0 - bc.Gamma / bc.Beta; bc.C1 = -1.0 / (bc.Beta * DeltaTime); bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime; bc.C3 = (1.0 - 0.5 / bc.Beta); bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta); bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta); bc.C6 = bc.Gamma / (bc.Beta * DeltaTime); bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta); return bc; } static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo) { const ProcessInfo& r_last_process_info = rCurrentProcessInfo.GetPreviousSolutionStepInfo(1); // Note: solution is backwards in time, but we still want a positive // time step // (it is the time step in the "forward" Bossak scheme). double time_step = r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME); KRATOS_ERROR_IF(time_step <= 0.0) << "Backwards in time solution is not decreasing time from last " "step." << std::endl; return time_step; } struct Hash { std::size_t operator()(const VariableData* const& p) const { return p->Key(); } }; struct Pred { bool operator()(const VariableData* const l, const VariableData* const r) const { return *l == *r; } }; // Gathers variables needed for assembly. static std::vector<const VariableData*> GatherVariables( const ModelPart::ElementsContainerType& rElements, std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars) { KRATOS_TRY; const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<const VariableData*> local_vars; std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads); #pragma omp parallel for private(local_vars) for (int i = 0; i < static_cast<int>(rElements.size()); ++i) { auto& r_element = *(rElements.begin() + i); GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars); const int k = OpenMPUtils::ThisThread(); thread_vars[k].insert(local_vars.begin(), local_vars.end()); } std::unordered_set<const VariableData*, Hash, Pred> all_vars; for (int i = 0; i < num_threads; ++i) { all_vars.insert(thread_vars[i].begin(), thread_vars[i].end()); } return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()}; KRATOS_CATCH(""); } static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables, Communicator& rComm) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedAdjointBossakScheme */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
prec_ddd_s_h.h
//**************************************************************************************** // // Copyright (c) 2015-2020, Yoshifumi Nakamura <nakamura@riken.jp> // Copyright (c) 2015-2020, Yuta Mukai <mukai.yuta@fujitsu.com> // Copyright (c) 2018-2020, Ken-Ichi Ishikawa <ishikawa@theo.phys.sci.hirosima-u.ac.jp> // Copyright (c) 2019-2020, Issaku Kanamori <kanamori-i@riken.jp> // // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer listed // in this license in the documentation and/or other materials // provided with the distribution. // // * Neither the name of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //---------------------------------------------------------------------------------------- // ACKNOWLEDGMENT // // This software has been developed in a co-design working group for the lattice QCD // supported by MEXT's programs for the Development and Improvement for the Next // Generation Ultra High-Speed Computer System, under its Subsidies for Operating the // Specific Advanced Large Research Facilities, and Priority Issue 9 // (Elucidation of the Fundamental Laws and Evolution of the Universe) to be tackled by // using the Supercomputer Fugaku. // //**************************************************************************************** #ifndef _PREC_DDD_H_H #define _PREC_DDD_H_H void prec_s_(scs_t *out, const scs_t *in, const int *nsap, const int *nm); void ddd_in_s_(scs_t *out, const scs_t *in, const int *domain); void jinv_ddd_in_s_(scs_t *x, const scs_t *b, const int *domain, const int *maxiter); void ddd_out_pre_s_( const scs_t *in, const int *domain); void ddd_out_pos_s_(scs_t *out, const scs_t *in, const int *domain, const float factor); void debug_half(scs_t *xs, const scs_t *bs, const int *nsap, const int *nm); void assign_mult_mSAP_s_h_(scs_t *xs, const scs_t *bs, const int *nsap, const int *nm) // // Multiply Half precision SAP preconditioner // // input and output are converted to half/single precision from single/half precision, respectively // // xs = conv_SP( MSAP_HP * conv_HP(bs) ) // // MSAP_HP : half presision SAP preconditioner // // MSAP = Ksap(sum_{j=0}^{nsap} (1-DKsap)^j) \sim D^{-1} // // xs : output quark field in single precision // bs : input quark field in single precision // nsap : sap fixed iteration count // nm : Jacobbi fixed iteration count for the approximate inverse of Dee/Doo in a even/odd domain // // { static sch_t *x = nullptr; static sch_t *b = nullptr; static sch_t *s = nullptr; static sch_t *q = nullptr; if (nullptr == x) x = (sch_t *)malloc( sizeof(sch_t) * vols*2); if (nullptr == b) b = (sch_t *)malloc( sizeof(sch_t) * vols*2); if (nullptr == s) s = (sch_t *)malloc( sizeof(sch_t) * vols*2); if (nullptr == q) q = (sch_t *)malloc( sizeof(sch_t) * vols*2); sch_t * __restrict__ xe = &x[vols*domain_e]; sch_t * __restrict__ xo = &x[vols*domain_o]; const sch_t * __restrict__ be = &b[vols*domain_e]; const sch_t * __restrict__ bo = &b[vols*domain_o]; sch_t * __restrict__ se = &s[vols*domain_e]; sch_t * __restrict__ so = &s[vols*domain_o]; sch_t * __restrict__ qe = &q[vols*domain_e]; sch_t * __restrict__ qo = &q[vols*domain_o]; if (*nsap > 10) { printf("%s : %s nsap > 10\n",__FILE__,__func__); exit(1); } if (*nm > 10) { printf("%s : %s nn > 10\n",__FILE__,__func__); exit(1); } #if 0 // prec_s_(xs, bs, nsap, nm); #else // debug_half(xs,bs,nsap,nm); ////////////////////////////// // convert float to half // b <= bs ////////////////////////////// float bsnorm; assign_q_s2h( b, bs, &bsnorm); #pragma omp parallel for for(int i=0; i<vols*2; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++) s[i].ccs[j].v[v] = b[i].ccs[j].v[v]; if (npe[1]==1 || npe[2]==1 || npe[3]==1){ #pragma omp parallel for for(int i=0; i<vols; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++) xo[i].ccs[j].v[v] = 0; } /////////////////////////////////// // SAP iteration /////////////////////////////////// for (int isap=1; isap < *nsap; isap++) { /////////////////////////////////// // xe = Aee se /////////////////////////////////// jinv_ddd_in_h_(xe, se, &domain_e, nm); /////////////////////////////////// // Send for Doe xe /////////////////////////////////// ddd_out_pre_h_( x, &domain_o); /////////////////////////////////// // qe = Dee xe /////////////////////////////////// ddd_in_h_( qe, xe, &domain_e); /////////////////////////////////// // se = se + be - qe /////////////////////////////////// #pragma omp parallel for for(int i=0; i<vols; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++) se[i].ccs[j].v[v] += be[i].ccs[j].v[v] - qe[i].ccs[j].v[v]; /////////////////////////////////// // so = so - Doe xe (Recv) /////////////////////////////////// ddd_out_pos_h_( so, x, &domain_o, (float)kappa); /////////////////////////////////// // xo = Aoo so /////////////////////////////////// jinv_ddd_in_h_( xo, so, &domain_o, nm); /////////////////////////////////// // Send for Deo xo /////////////////////////////////// ddd_out_pre_h_( x, &domain_e); /////////////////////////////////// // qo = Doo xo /////////////////////////////////// ddd_in_h_( qo, xo, &domain_o); /////////////////////////////////// // so = so + bo - qo /////////////////////////////////// #pragma omp parallel for for(int i=0; i<vols; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++) so[i].ccs[j].v[v] += bo[i].ccs[j].v[v] - qo[i].ccs[j].v[v]; /////////////////////////////////// // se = se - Deo xo (Recv) /////////////////////////////////// ddd_out_pos_h_( se, x, &domain_e, (float)kappa); } // end for isap if (npe[1]==1 || npe[2]==1 || npe[3]==1){ #pragma omp parallel for for(int i=0; i<vols; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++) xo[i].ccs[j].v[v] = 0; } /////////////////////////////////// // xe = Aee se /////////////////////////////////// jinv_ddd_in_h_( xe, se, &domain_e, nm); /////////////////////////////////// // Send for Doe xe /////////////////////////////////// ddd_out_pre_h_( x, &domain_o); /////////////////////////////////// // so = so - Doe xe /////////////////////////////////// ddd_out_pos_h_( so, x, &domain_o, (float)kappa); /////////////////////////////////// // xo = Aoo so /////////////////////////////////// jinv_ddd_in_h_( xo, so, &domain_o, nm); ////////////////////////////// // convert half to float // xs <= x ////////////////////////////// assign_q_h2s( xs, x, &bsnorm); #endif } void assign_mult_wd_s_(scs_t *x, const scs_t *b) // // Multiply Wilson/Clover operator (single precision) // // x = D b // { scs_t * __restrict__ xe = &x[vols*domain_e]; scs_t * __restrict__ xo = &x[vols*domain_o]; const scs_t * __restrict__ be = &b[vols*domain_e]; const scs_t * __restrict__ bo = &b[vols*domain_o]; /////////////////////// // Send for Deo bo /////////////////////// ddd_out_pre_s_( b, &domain_e); /////////////////////// // xe = Dee be /////////////////////// ddd_in_s_( xe, be, &domain_e); /////////////////////// // xe = xe + Deo bo // = xe - kappa Meo bo /////////////////////// ddd_out_pos_s_( xe, b, &domain_e, (float)mkappa); /////////////////////// // Send for Doe be /////////////////////// ddd_out_pre_s_( b, &domain_o); /////////////////////// // xo = Dee bo /////////////////////// ddd_in_s_( xo, bo, &domain_o); /////////////////////// // xo = xo + Doe be // = xo - kappa Moe be /////////////////////// ddd_out_pos_s_( xo, b, &domain_o, (float)mkappa); } void debug_half(scs_t *xs, const scs_t *bs, const int *nsap, const int *nm) // // debug // { const float tol = 1.0e-5f; static sch_t *x = nullptr; static sch_t *b = nullptr; if (nullptr == x) x = (sch_t *)malloc( sizeof(sch_t) * vols*2); if (nullptr == b) b = (sch_t *)malloc( sizeof(sch_t) * vols*2); static scs_t *xxs = nullptr; if (nullptr == xxs) xxs = (scs_t *)malloc( sizeof(scs_t) * vols*2); sch_t * __restrict__ xe = &x[vols*domain_e]; sch_t * __restrict__ xo = &x[vols*domain_o]; const sch_t * __restrict__ be = &b[vols*domain_e]; const sch_t * __restrict__ bo = &b[vols*domain_o]; ////////////////////// // check // xe = Dee be // xo = Doo bo ////////////////////// ddd_in_s_( &xxs[vols*domain_e], &bs[vols*domain_e], &domain_e); ddd_in_s_( &xxs[vols*domain_o], &bs[vols*domain_o], &domain_o); float bsnorm; assign_q_s2h( b, bs, &bsnorm); ddd_in_h_( xe, be, &domain_e); ddd_in_h_( xo, bo, &domain_o); assign_q_h2s( xs, x, &bsnorm); for(int i=0; i<vols*2; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++){ float rr = xxs[i].ccs[j].v[v] - xs[i].ccs[j].v[v]; if ( rr*rr > tol*tol && rank == 0) printf("CHKDEE: %3d %3d %3d %4d (SP)x = %14.6e (HP)x = %14.6e diff = %14.6e\n", rank,v,j,i,xxs[i].ccs[j].v[v], xs[i].ccs[j].v[v],rr); } ////////////////////// // check // xe = Aee be // xo = Aoo bo ////////////////////// jinv_ddd_in_s_( &xxs[vols*domain_e], &bs[vols*domain_e], &domain_e, nm); jinv_ddd_in_s_( &xxs[vols*domain_o], &bs[vols*domain_o], &domain_o, nm); assign_q_s2h( b, bs, &bsnorm); jinv_ddd_in_h_( xe, be, &domain_e, nm); jinv_ddd_in_h_( xo, bo, &domain_o, nm); assign_q_h2s( xs, x, &bsnorm); for(int i=0; i<vols*2; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++){ float rr = xxs[i].ccs[j].v[v] - xs[i].ccs[j].v[v]; if ( rr*rr > tol*tol && rank == 0) printf("CHKAEE: %3d %3d %3d %4d (SP)x = %14.6e (HP)x = %14.6e diff = %14.6e\n", rank,v,j,i,xxs[i].ccs[j].v[v], xs[i].ccs[j].v[v],rr); } ////////////////////// // check // xe = be - Deo bo // xo = bo - Doe be ////////////////////// #pragma omp parallel for for(int i=0; i<vols*2; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++) xxs[i].ccs[j].v[v] = bs[i].ccs[j].v[v]; ddd_out_pre_s_( bs, &domain_e); ddd_out_pos_s_( &xxs[vols*domain_e], bs, &domain_e, (float)kappa); ddd_out_pre_s_( bs, &domain_o); ddd_out_pos_s_( &xxs[vols*domain_o], bs, &domain_o, (float)kappa); assign_q_s2h( b, bs, &bsnorm); #pragma omp parallel for for(int i=0; i<vols*2; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++) x[i].ccs[j].v[v] = b[i].ccs[j].v[v]; ddd_out_pre_h_( b, &domain_e); ddd_out_pos_h_( xe, b, &domain_e, (float)kappa); ddd_out_pre_h_( b, &domain_o); ddd_out_pos_h_( xo, b, &domain_o, (float)kappa); assign_q_h2s( xs, x, &bsnorm); for(int i=0; i<vols*2; i++) for(int j=0; j<24; j++) for(int v=0; v<VLENS; v++){ float rr = xxs[i].ccs[j].v[v] - xs[i].ccs[j].v[v]; if ( rr*rr > tol*tol && rank == 0) printf("CHKDEO: %3d %3d %3d %4d (SP)x = %14.6e (HP)x = %14.6e diff = %14.6e\n", rank,v,j,i,xxs[i].ccs[j].v[v], xs[i].ccs[j].v[v],rr); } } #endif
libsais.c
/*-- This file is a part of libsais, a library for linear time suffix array and burrows wheeler transform construction. Copyright (c) 2021-2022 Ilya Grebnov <ilya.grebnov@gmail.com> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Please see the file LICENSE for full copyright information. --*/ #include "libsais.h" #include <stddef.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <limits.h> #if defined(_OPENMP) #include <omp.h> #else #define UNUSED(_x) (void)(_x) #endif typedef int32_t sa_sint_t; typedef uint32_t sa_uint_t; typedef ptrdiff_t fast_sint_t; typedef size_t fast_uint_t; #define SAINT_BIT (32) #define SAINT_MAX INT32_MAX #define SAINT_MIN INT32_MIN #define ALPHABET_SIZE (1 << CHAR_BIT) #define UNBWT_FASTBITS (17) #define SUFFIX_GROUP_BIT (SAINT_BIT - 1) #define SUFFIX_GROUP_MARKER (((sa_sint_t)1) << (SUFFIX_GROUP_BIT - 1)) #define BUCKETS_INDEX2(_c, _s) (((_c) << 1) + (_s)) #define BUCKETS_INDEX4(_c, _s) (((_c) << 2) + (_s)) #define LIBSAIS_PER_THREAD_CACHE_SIZE (24576) typedef struct LIBSAIS_THREAD_CACHE { sa_sint_t symbol; sa_sint_t index; } LIBSAIS_THREAD_CACHE; typedef union LIBSAIS_THREAD_STATE { struct { fast_sint_t position; fast_sint_t count; fast_sint_t m; fast_sint_t last_lms_suffix; sa_sint_t * buckets; LIBSAIS_THREAD_CACHE * cache; } state; uint8_t padding[64]; } LIBSAIS_THREAD_STATE; typedef struct LIBSAIS_CONTEXT { sa_sint_t * buckets; LIBSAIS_THREAD_STATE * thread_state; fast_sint_t threads; } LIBSAIS_CONTEXT; typedef struct LIBSAIS_UNBWT_CONTEXT { sa_uint_t * bucket2; uint16_t * fastbits; sa_uint_t * buckets; fast_sint_t threads; } LIBSAIS_UNBWT_CONTEXT; #if defined(__GNUC__) || defined(__clang__) #define RESTRICT __restrict__ #elif defined(_MSC_VER) || defined(__INTEL_COMPILER) #define RESTRICT __restrict #else #error Your compiler, configuration or platform is not supported. #endif #if defined(__has_builtin) #if __has_builtin(__builtin_prefetch) #define HAS_BUILTIN_PREFECTCH #endif #elif defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 2)) || (__GNUC__ >= 4) #define HAS_BUILTIN_PREFECTCH #endif #if defined(__has_builtin) #if __has_builtin(__builtin_bswap16) #define HAS_BUILTIN_BSWAP16 #endif #elif defined(__GNUC__) && ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ >= 5) #define HAS_BUILTIN_BSWAP16 #endif #if defined(HAS_BUILTIN_PREFECTCH) #define libsais_prefetch(address) __builtin_prefetch((const void *)(address), 0, 0) #define libsais_prefetchw(address) __builtin_prefetch((const void *)(address), 1, 0) #elif defined (_M_IX86) || defined (_M_AMD64) #include <intrin.h> #define libsais_prefetch(address) _mm_prefetch((const void *)(address), _MM_HINT_NTA) #define libsais_prefetchw(address) _m_prefetchw((const void *)(address)) #elif defined (_M_ARM) #include <intrin.h> #define libsais_prefetch(address) __prefetch((const void *)(address)) #define libsais_prefetchw(address) __prefetchw((const void *)(address)) #elif defined (_M_ARM64) #include <intrin.h> #define libsais_prefetch(address) __prefetch2((const void *)(address), 1) #define libsais_prefetchw(address) __prefetch2((const void *)(address), 17) #else #error Your compiler, configuration or platform is not supported. #endif #if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #if defined(_LITTLE_ENDIAN) \ || (defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && BYTE_ORDER == LITTLE_ENDIAN) \ || (defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && _BYTE_ORDER == _LITTLE_ENDIAN) \ || (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN) \ || (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define __LITTLE_ENDIAN__ #elif defined(_BIG_ENDIAN) \ || (defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN) \ || (defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && _BYTE_ORDER == _BIG_ENDIAN) \ || (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && __BYTE_ORDER == __BIG_ENDIAN) \ || (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) #define __BIG_ENDIAN__ #elif defined(_WIN32) #define __LITTLE_ENDIAN__ #endif #endif #if defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #if defined(HAS_BUILTIN_BSWAP16) #define libsais_bswap16(x) (__builtin_bswap16(x)) #elif defined(_MSC_VER) && !defined(__INTEL_COMPILER) #define libsais_bswap16(x) (_byteswap_ushort(x)) #else #define libsais_bswap16(x) ((uint16_t)(x >> 8) | (uint16_t)(x << 8)) #endif #elif !defined(__LITTLE_ENDIAN__) && defined(__BIG_ENDIAN__) #define libsais_bswap16(x) (x) #else #error Your compiler, configuration or platform is not supported. #endif static void * libsais_align_up(const void * address, size_t alignment) { return (void *)((((ptrdiff_t)address) + ((ptrdiff_t)alignment) - 1) & (-((ptrdiff_t)alignment))); } static void * libsais_alloc_aligned(size_t size, size_t alignment) { void * address = malloc(size + sizeof(short) + alignment - 1); if (address != NULL) { void * aligned_address = libsais_align_up((void *)((ptrdiff_t)address + (ptrdiff_t)(sizeof(short))), alignment); ((short *)aligned_address)[-1] = (short)((ptrdiff_t)aligned_address - (ptrdiff_t)address); return aligned_address; } return NULL; } static void libsais_free_aligned(void * aligned_address) { if (aligned_address != NULL) { free((void *)((ptrdiff_t)aligned_address - ((short *)aligned_address)[-1])); } } static LIBSAIS_THREAD_STATE * libsais_alloc_thread_state(sa_sint_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = (LIBSAIS_THREAD_STATE *)libsais_alloc_aligned((size_t)threads * sizeof(LIBSAIS_THREAD_STATE), 4096); sa_sint_t * RESTRICT thread_buckets = (sa_sint_t *)libsais_alloc_aligned((size_t)threads * 4 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); LIBSAIS_THREAD_CACHE * RESTRICT thread_cache = (LIBSAIS_THREAD_CACHE *)libsais_alloc_aligned((size_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE * sizeof(LIBSAIS_THREAD_CACHE), 4096); if (thread_state != NULL && thread_buckets != NULL && thread_cache != NULL) { fast_sint_t t; for (t = 0; t < threads; ++t) { thread_state[t].state.buckets = thread_buckets; thread_buckets += 4 * ALPHABET_SIZE; thread_state[t].state.cache = thread_cache; thread_cache += LIBSAIS_PER_THREAD_CACHE_SIZE; } return thread_state; } libsais_free_aligned(thread_cache); libsais_free_aligned(thread_buckets); libsais_free_aligned(thread_state); return NULL; } static void libsais_free_thread_state(LIBSAIS_THREAD_STATE * thread_state) { if (thread_state != NULL) { libsais_free_aligned(thread_state[0].state.cache); libsais_free_aligned(thread_state[0].state.buckets); libsais_free_aligned(thread_state); } } static LIBSAIS_CONTEXT * libsais_create_ctx_main(sa_sint_t threads) { LIBSAIS_CONTEXT * RESTRICT ctx = (LIBSAIS_CONTEXT *)libsais_alloc_aligned(sizeof(LIBSAIS_CONTEXT), 64); sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL; if (ctx != NULL && buckets != NULL && (thread_state != NULL || threads == 1)) { ctx->buckets = buckets; ctx->threads = threads; ctx->thread_state = thread_state; return ctx; } libsais_free_thread_state(thread_state); libsais_free_aligned(buckets); libsais_free_aligned(ctx); return NULL; } static void libsais_free_ctx_main(LIBSAIS_CONTEXT * ctx) { if (ctx != NULL) { libsais_free_thread_state(ctx->thread_state); libsais_free_aligned(ctx->buckets); libsais_free_aligned(ctx); } } #if defined(_OPENMP) static sa_sint_t libsais_count_negative_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { sa_sint_t count = 0; fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] < 0); } return count; } static sa_sint_t libsais_count_zero_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { sa_sint_t count = 0; fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] == 0); } return count; } static void libsais_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&cache[i + 2 * prefetch_distance]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 0].symbol]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 1].symbol]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 2].symbol]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 3].symbol]); SA[cache[i + 0].symbol] = cache[i + 0].index; SA[cache[i + 1].symbol] = cache[i + 1].index; SA[cache[i + 2].symbol] = cache[i + 2].index; SA[cache[i + 3].symbol] = cache[i + 3].index; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[cache[i].symbol] = cache[i].index; } } static void libsais_compact_and_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais_prefetchw(&cache[i + prefetch_distance]); cache[l] = cache[i + 0]; l += cache[l].symbol >= 0; cache[l] = cache[i + 1]; l += cache[l].symbol >= 0; cache[l] = cache[i + 2]; l += cache[l].symbol >= 0; cache[l] = cache[i + 3]; l += cache[l].symbol >= 0; } for (j += 3; i < j; i += 1) { cache[l] = cache[i]; l += cache[l].symbol >= 0; } libsais_place_cached_suffixes(SA, cache, omp_block_start, l - omp_block_start); } static void libsais_accumulate_counts_s32_2(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s]; } } static void libsais_accumulate_counts_s32_3(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s]; } } static void libsais_accumulate_counts_s32_4(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s]; } } static void libsais_accumulate_counts_s32_5(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s]; } } static void libsais_accumulate_counts_s32_6(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s]; } } static void libsais_accumulate_counts_s32_7(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s]; } } static void libsais_accumulate_counts_s32_8(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s]; } } static void libsais_accumulate_counts_s32_9(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride; sa_sint_t * RESTRICT bucket08 = bucket07 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s] + bucket08[s]; } } static void libsais_accumulate_counts_s32(sa_sint_t * RESTRICT buckets, fast_sint_t bucket_size, fast_sint_t bucket_stride, fast_sint_t num_buckets) { while (num_buckets >= 9) { libsais_accumulate_counts_s32_9(buckets - (num_buckets - 9) * bucket_stride, bucket_size, bucket_stride); num_buckets -= 8; } switch (num_buckets) { case 1: break; case 2: libsais_accumulate_counts_s32_2(buckets, bucket_size, bucket_stride); break; case 3: libsais_accumulate_counts_s32_3(buckets, bucket_size, bucket_stride); break; case 4: libsais_accumulate_counts_s32_4(buckets, bucket_size, bucket_stride); break; case 5: libsais_accumulate_counts_s32_5(buckets, bucket_size, bucket_stride); break; case 6: libsais_accumulate_counts_s32_6(buckets, bucket_size, bucket_stride); break; case 7: libsais_accumulate_counts_s32_7(buckets, bucket_size, bucket_stride); break; case 8: libsais_accumulate_counts_s32_8(buckets, bucket_size, bucket_stride); break; } } #endif static void libsais_gather_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, fast_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 128; fast_sint_t i, j = omp_block_start + omp_block_size, c0 = T[omp_block_start + omp_block_size - 1], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = omp_block_start + omp_block_size - 2, j = omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); } for (j -= 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); } SA[m] = (sa_sint_t)(i + 1); } } static void libsais_gather_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_gather_lms_suffixes_8u(T, SA, n, (fast_sint_t)n - 1, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t > omp_thread_num; --t) { m += thread_state[t].state.m; } libsais_gather_lms_suffixes_8u(T, SA, n, (fast_sint_t)n - 1 - m, omp_block_start, omp_block_size); #pragma omp barrier if (thread_state[omp_thread_num].state.m > 0) { SA[(fast_sint_t)n - 1 - m] = (sa_sint_t)thread_state[omp_thread_num].state.last_lms_suffix; } } #endif } } static sa_sint_t libsais_gather_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = n - 1; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= 3; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((s & 3) == 1); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((s & 3) == 1); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((s & 3) == 1); } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1); } return n - 1 - m; } static sa_sint_t libsais_gather_compacted_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = n - 1; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= 3; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); } return n - 1 - m; } #if defined(_OPENMP) static void libsais_count_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]++; } #endif static void libsais_count_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++; } #if defined(_OPENMP) static void libsais_count_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++; } #endif static sa_sint_t libsais_count_and_gather_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 128; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (j -= 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais_count_and_gather_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_8u(T, SA, n, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.m = libsais_count_and_gather_lms_suffixes_8u(T, SA, n, thread_state[omp_thread_num].state.buckets, omp_block_start, omp_block_size); if (thread_state[omp_thread_num].state.m > 0) { thread_state[omp_thread_num].state.last_lms_suffix = SA[thread_state[omp_thread_num].state.position - 1]; } } #pragma omp barrier #pragma omp master { memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.m; if (t != omp_num_threads - 1 && thread_state[t].state.m > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.m], (size_t)thread_state[t].state.m * sizeof(sa_sint_t)); } { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t s; for (s = 0; s < 4 * ALPHABET_SIZE; s += 1) { sa_sint_t A = buckets[s], B = temp_bucket[s]; buckets[s] = A + B; temp_bucket[s] = A; } } } } } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais_count_and_gather_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } #if defined(_OPENMP) static fast_sint_t libsais_get_bucket_stride(fast_sint_t free_space, fast_sint_t bucket_size, fast_sint_t num_buckets) { fast_sint_t bucket_size_1024 = (bucket_size + 1023) & (-1024); if (free_space / (num_buckets - 1) >= bucket_size_1024) { return bucket_size_1024; } fast_sint_t bucket_size_16 = (bucket_size + 15) & (-16); if (free_space / (num_buckets - 1) >= bucket_size_16) { return bucket_size_16; } return bucket_size; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 4 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier if (omp_thread_num == omp_num_threads - 1) { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } else { omp_num_threads = omp_num_threads - 1; omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1); } } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 2 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier if (omp_thread_num == omp_num_threads - 1) { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } else { omp_num_threads = omp_num_threads - 1; omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1); } } #endif } return m; } static void libsais_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 2 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n + n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA + n, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) { m += (sa_sint_t)thread_state[t].state.count; } if (thread_state[omp_thread_num].state.count > 0) { memcpy(&SA[n - m], &SA[n + thread_state[omp_thread_num].state.position - thread_state[omp_thread_num].state.count], (size_t)thread_state[omp_thread_num].state.count * sizeof(sa_sint_t)); } } { omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads); } } #endif } } #endif static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais_count_lms_suffixes_32s_4k(T, n, k, buckets); } else { m = libsais_gather_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } else { m = libsais_gather_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais_count_compacted_lms_suffixes_32s_2k(T, n, k, buckets); } else { m = libsais_gather_compacted_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m; #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((4 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 16 / k) { max_threads = n / 16 / k; } m = libsais_count_and_gather_lms_suffixes_32s_4k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { m = libsais_count_and_gather_lms_suffixes_32s_4k_nofs_omp(T, SA, n, k, buckets, threads); } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m; #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 8 / k) { max_threads = n / 8 / k; } m = libsais_count_and_gather_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { m = libsais_count_and_gather_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads); } return m; } static void libsais_count_and_gather_compacted_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n + n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 8 / k) { max_threads = n / 8 / k; } libsais_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { libsais_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads); } } static void libsais_count_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, (size_t)k * sizeof(sa_sint_t)); fast_sint_t i, j; for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8) { libsais_prefetch(&T[i + prefetch_distance]); buckets[T[i + 0]]++; buckets[T[i + 1]]++; buckets[T[i + 2]]++; buckets[T[i + 3]]++; buckets[T[i + 4]]++; buckets[T[i + 5]]++; buckets[T[i + 6]]++; buckets[T[i + 7]]++; } for (j += 7; i < j; i += 1) { buckets[T[i]]++; } } static void libsais_initialize_buckets_start_and_end_8u(sa_sint_t * RESTRICT buckets, sa_sint_t * RESTRICT freq) { sa_sint_t * RESTRICT bucket_start = &buckets[6 * ALPHABET_SIZE]; sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE]; if (freq != NULL) { fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += (freq[j] = buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]); bucket_end[j] = sum; } } else { fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]; bucket_end[j] = sum; } } } static void libsais_initialize_buckets_start_and_end_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT bucket_start = &buckets[4 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[5 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]; bucket_end[j] = sum; } } static void libsais_initialize_buckets_start_and_end_32s_4k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT bucket_start = &buckets[2 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; bucket_end[j] = sum; } } static void libsais_initialize_buckets_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum0 = 0; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0; } } static void libsais_initialize_buckets_start_and_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i, j; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { buckets[j] = buckets[i]; } buckets[k] = 0; memcpy(&buckets[k + 1], buckets, ((size_t)k - 1) * sizeof(sa_sint_t)); } static void libsais_initialize_buckets_start_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum = 0; for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sa_sint_t tmp = buckets[i]; buckets[i] = sum; sum += tmp; } } static void libsais_initialize_buckets_end_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum = 0; for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sum += buckets[i]; buckets[i] = sum; } } static sa_sint_t libsais_initialize_buckets_for_lms_suffixes_radix_sort_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { { fast_uint_t s = 0; fast_sint_t c0 = T[first_lms_suffix]; fast_sint_t c1 = 0; for (; --first_lms_suffix >= 0; ) { c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--; } { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum; } return sum; } } static void libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--; fast_sint_t i; sa_sint_t sum0 = 0, sum1 = 0; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; sum1 += buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0; buckets[i + BUCKETS_INDEX2(0, 1)] = sum1; } } static sa_sint_t libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { { fast_uint_t s = 0; fast_sint_t c0 = T[first_lms_suffix]; fast_sint_t c1 = 0; for (; --first_lms_suffix >= 0; ) { c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--; } { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum; } return sum; } } static void libsais_initialize_buckets_for_radix_and_partial_sorting_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { sa_sint_t * RESTRICT bucket_start = &buckets[2 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--; fast_sint_t i, j; sa_sint_t sum0 = 0, sum1 = 0; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { bucket_start[j] = sum1; sum0 += buckets[i + BUCKETS_INDEX2(0, 1)]; sum1 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 1)] = sum0; bucket_end[j] = sum1; } } static void libsais_radix_sort_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - prefetch_distance - 3]]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p; } } static void libsais_radix_sort_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && m >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { libsais_radix_sort_lms_suffixes_8u(T, SA, &buckets[4 * ALPHABET_SIZE], (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { { sa_sint_t * RESTRICT src_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT dst_bucket = thread_state[omp_thread_num].state.buckets; fast_sint_t i, j; for (i = BUCKETS_INDEX2(0, 0), j = BUCKETS_INDEX4(0, 1); i <= BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX2(1, 0), j += BUCKETS_INDEX4(1, 0)) { dst_bucket[i] = src_bucket[i] - dst_bucket[j]; } } { fast_sint_t t, omp_block_start = 0, omp_block_size = thread_state[omp_thread_num].state.m; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) omp_block_start += thread_state[t].state.m; if (omp_block_start == (fast_sint_t)m && omp_block_size > 0) { omp_block_start -= 1; omp_block_size -= 1; } libsais_radix_sort_lms_suffixes_8u(T, SA, thread_state[omp_thread_num].state.buckets, (fast_sint_t)n - omp_block_start, omp_block_size); } } #endif } } static void libsais_radix_sort_lms_suffixes_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - 3 * prefetch_distance]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 0]]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 1]]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 2]]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 3]]]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[T[p0]]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[T[p1]]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[T[p2]]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[T[p3]]] = p3; } for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[T[p]]] = p; } } static void libsais_radix_sort_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - 3 * prefetch_distance]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 0]], 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 1]], 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 2]], 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 3]], 0)]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3; } for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p; } } #if defined(_OPENMP) static void libsais_radix_sort_lms_suffixes_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0]]); libsais_prefetch(&T[SA[i + prefetch_distance + 1]]); libsais_prefetch(&T[SA[i + prefetch_distance + 2]]); libsais_prefetch(&T[SA[i + prefetch_distance + 3]]); libsais_prefetchw(&cache[i + prefetch_distance]); cache[i + 0].symbol = T[cache[i + 0].index = SA[i + 0]]; cache[i + 1].symbol = T[cache[i + 1].index = SA[i + 1]]; cache[i + 2].symbol = T[cache[i + 2].index = SA[i + 2]]; cache[i + 3].symbol = T[cache[i + 3].index = SA[i + 3]]; } for (j += prefetch_distance + 3; i < j; i += 1) { cache[i].symbol = T[cache[i].index = SA[i]]; } } static void libsais_radix_sort_lms_suffixes_32s_6k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 0].symbol]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 1].symbol]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 2].symbol]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 3].symbol]); cache[i - 0].symbol = --induction_bucket[cache[i - 0].symbol]; cache[i - 1].symbol = --induction_bucket[cache[i - 1].symbol]; cache[i - 2].symbol = --induction_bucket[cache[i - 2].symbol]; cache[i - 3].symbol = --induction_bucket[cache[i - 3].symbol]; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { cache[i].symbol = --induction_bucket[cache[i].symbol]; } } static void libsais_radix_sort_lms_suffixes_32s_2k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 0].symbol, 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 1].symbol, 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 2].symbol, 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 3].symbol, 0)]); cache[i - 0].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 0].symbol, 0)]; cache[i - 1].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 1].symbol, 0)]; cache[i - 2].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 2].symbol, 0)]; cache[i - 3].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 3].symbol, 0)]; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { cache[i].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i].symbol, 0)]; } } static void libsais_radix_sort_lms_suffixes_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_radix_sort_lms_suffixes_32s_6k_block_sort(induction_bucket, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } static void libsais_radix_sort_lms_suffixes_32s_2k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_radix_sort_lms_suffixes_32s_2k_block_sort(induction_bucket, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static void libsais_radix_sort_lms_suffixes_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || m < 65536) { libsais_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; } libsais_radix_sort_lms_suffixes_32s_6k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static void libsais_radix_sort_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || m < 65536) { libsais_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; } libsais_radix_sort_lms_suffixes_32s_2k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais_radix_sort_lms_suffixes_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = 0; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; fast_sint_t c2 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 0]]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 1]]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 2]]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 3]]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i + 1; m++; } c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 0; m++; } c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i - 1; m++; } c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 2; m++; } } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i + 1; m++; } } if (m > 1) { SA[buckets[c2]] = 0; } return m; } static void libsais_radix_sort_set_markers_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&induction_bucket[i + 2 * prefetch_distance]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 0]]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 1]]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 2]]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 3]]); SA[induction_bucket[i + 0]] |= SAINT_MIN; SA[induction_bucket[i + 1]] |= SAINT_MIN; SA[induction_bucket[i + 2]] |= SAINT_MIN; SA[induction_bucket[i + 3]] |= SAINT_MIN; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[induction_bucket[i]] |= SAINT_MIN; } } static void libsais_radix_sort_set_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&induction_bucket[BUCKETS_INDEX2(i + 2 * prefetch_distance, 0)]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 0, 0)]]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 1, 0)]]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 2, 0)]]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 3, 0)]]); SA[induction_bucket[BUCKETS_INDEX2(i + 0, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 1, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 2, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 3, 0)]] |= SUFFIX_GROUP_MARKER; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[induction_bucket[BUCKETS_INDEX2(i, 0)]] |= SUFFIX_GROUP_MARKER; } } static void libsais_radix_sort_set_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)k - 1; #endif libsais_radix_sort_set_markers_32s_6k(SA, induction_bucket, omp_block_start, omp_block_size); } } static void libsais_radix_sort_set_markers_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)k - 1; #endif libsais_radix_sort_set_markers_32s_4k(SA, induction_bucket, omp_block_start, omp_block_size); } } static void libsais_initialize_buckets_for_partial_sorting_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; buckets[BUCKETS_INDEX4((fast_uint_t)T[first_lms_suffix], 1)]++; fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0; for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; sum0 += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 2)]; sum1 += buckets[i + BUCKETS_INDEX4(0, 1)]; buckets[j + BUCKETS_INDEX2(0, 0)] = sum0; buckets[j + BUCKETS_INDEX2(0, 1)] = sum1; } } static void libsais_initialize_buckets_for_partial_sorting_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0, sum2 = 0; for (first_lms_suffix = T[first_lms_suffix], i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4((fast_sint_t)first_lms_suffix - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)]; sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)]; sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)]; sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)]; buckets[i + BUCKETS_INDEX4(0, 0)] = sum0; buckets[i + BUCKETS_INDEX4(0, 1)] = sum2; buckets[i + BUCKETS_INDEX4(0, 2)] = 0; buckets[i + BUCKETS_INDEX4(0, 3)] = 0; sum0 += SS + SL; sum1 += LS; sum2 += LS + LL; temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1; } for (sum1 += 1; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)]; sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)]; sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)]; sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)]; buckets[i + BUCKETS_INDEX4(0, 0)] = sum0; buckets[i + BUCKETS_INDEX4(0, 1)] = sum2; buckets[i + BUCKETS_INDEX4(0, 2)] = 0; buckets[i + BUCKETS_INDEX4(0, 3)] = 0; sum0 += SS + SL; sum1 += LS; sum2 += LS + LL; temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1; } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } return d; } #if defined(_OPENMP) static void libsais_partial_sorting_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; sa_sint_t d = 1; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = cache[count].index = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d; sa_sint_t p1 = cache[count].index = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); induction_bucket[v]++; distinct_names[v] = d; } state[0].state.position = (fast_sint_t)d - 1; state[0].state.count = count; } static void libsais_partial_sorting_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = 0, j = count - 1; i < j; i += 2) { libsais_prefetch(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol; SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol; SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += 1; i < j; i += 1) { sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol; SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_left_to_right_8u(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]); } #pragma omp barrier #pragma omp master { sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE]; fast_sint_t c; for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A + B; temp_induction_bucket[c] = A; } for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; } d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position; } } #pragma omp barrier { libsais_partial_sorting_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position); } } #endif } return d; } #endif static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; SA[induction_bucket[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN; distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])] = ++d; if (threads == 1 || left_suffixes_count < 65536) { d = libsais_partial_sorting_scan_left_to_right_8u(T, SA, buckets, d, 0, left_suffixes_count); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < left_suffixes_count; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > left_suffixes_count) { block_max_end = left_suffixes_count;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } else { d = libsais_partial_sorting_scan_left_to_right_8u_block_omp(T, SA, buckets, d, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 3 * prefetch_distance]); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i + prefetch_distance + 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais_prefetchw(&buckets[v0]); sa_sint_t p1 = SA[i + prefetch_distance + 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais_prefetchw(&buckets[v1]); sa_sint_t p2 = SA[i + 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] >= T[p2 - 1]); SA[buckets[v2]++] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d; sa_sint_t p3 = SA[i + 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] >= T[p3 - 1]); SA[buckets[v3]++] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d; } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); SA[buckets[v]++] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; } return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts2]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts3]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; } } return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static void libsais_partial_sorting_scan_left_to_right_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); } cache[i].symbol = symbol; } } static void libsais_partial_sorting_scan_left_to_right_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX; } } static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX; } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); libsais_prefetchw(&buckets[cache[i + prefetch_distance + 0].symbol]); libsais_prefetchw(&buckets[cache[i + prefetch_distance + 1].symbol]); sa_sint_t v0 = cache[i + 0].symbol, p0 = cache[i + 0].index; d += (p0 < 0); cache[i + 0].symbol = buckets[v0]++; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t s = cache[i + 0].symbol, q = (cache[s].index = cache[i + 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } sa_sint_t v1 = cache[i + 1].symbol, p1 = cache[i + 1].index; d += (p1 < 0); cache[i + 1].symbol = buckets[v1]++; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t s = cache[i + 1].symbol, q = (cache[s].index = cache[i + 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = buckets[v]++; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; if (cache[i].symbol < omp_block_end) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } } return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais_prefetchw(s0 >= 0 ? Ds0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais_prefetchw(s1 >= 0 ? Ds1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { sa_sint_t p0 = cache[i + 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 0].symbol = induction_bucket[v0 >> 1]++; cache[i + 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 0].index = np & SAINT_MAX; } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { sa_sint_t p1 = cache[i + 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 1].symbol = induction_bucket[v1 >> 1]++; cache[i + 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 1].index = np & SAINT_MAX; } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = induction_bucket[v >> 1]++; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i].index = np & SAINT_MAX; } } } return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { cache[i + 0].symbol = induction_bucket[v0]++; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 0].index = np & SAINT_MAX; } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { cache[i + 1].symbol = induction_bucket[v1]++; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 1].index = np & SAINT_MAX; } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = induction_bucket[v]++; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i].index = np & SAINT_MAX; } } } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_left_to_right_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_left_to_right_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_partial_sorting_scan_left_to_right_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[buckets[BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN; buckets[2 + BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])] = ++d; if (threads == 1 || left_suffixes_count < 65536) { d = libsais_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, 0, left_suffixes_count); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < left_suffixes_count; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > left_suffixes_count) { block_end = left_suffixes_count; } d = libsais_partial_sorting_scan_left_to_right_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)) | SUFFIX_GROUP_MARKER; distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] < T[n - 1])] = ++d; if (threads == 1 || n < 65536) { d = libsais_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } d = libsais_partial_sorting_scan_left_to_right_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[buckets[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } libsais_partial_sorting_scan_left_to_right_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static void libsais_partial_sorting_shift_markers_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, const sa_sint_t * RESTRICT buckets, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536) #else UNUSED(threads); UNUSED(n); #endif for (c = BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); c >= BUCKETS_INDEX2(1, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t i, j; sa_sint_t s = SAINT_MIN; for (i = (fast_sint_t)temp_bucket[c] - 1, j = (fast_sint_t)buckets[c - BUCKETS_INDEX2(1, 0)] + 3; i >= j; i -= 4) { libsais_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q; } } } static void libsais_partial_sorting_shift_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, const sa_sint_t * RESTRICT buckets, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && k >= 65536) #else UNUSED(threads); #endif for (c = (fast_sint_t)k - 1; c >= 1; c -= 1) { fast_sint_t i, j; sa_sint_t s = SAINT_MIN; for (i = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 0)] - 1, j = (fast_sint_t)temp_bucket[BUCKETS_INDEX2(c - 1, 0)] + 3; i >= j; i -= 4) { libsais_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q; } } } static void libsais_partial_sorting_shift_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; fast_sint_t i; sa_sint_t s = SUFFIX_GROUP_MARKER; for (i = (fast_sint_t)n - 1; i >= 3; i -= 4) { libsais_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = ((p0 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p0 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = ((p1 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p1 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = ((p2 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p2 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = ((p3 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p3 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (; i >= 0; i -= 1) { sa_sint_t p = SA[i], q = ((p & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q; SA[i] = p ^ q; } } static void libsais_partial_sorting_shift_buckets_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { buckets[2 * i + BUCKETS_INDEX4(0, 0)] = temp_bucket[i + BUCKETS_INDEX2(0, 0)]; buckets[2 * i + BUCKETS_INDEX4(0, 1)] = temp_bucket[i + BUCKETS_INDEX2(0, 1)]; } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } return d; } #if defined(_OPENMP) static void libsais_partial_sorting_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; sa_sint_t d = 1; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = cache[count].index = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d; sa_sint_t p1 = cache[count].index = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d; } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); induction_bucket[v]++; distinct_names[v] = d; } state[0].state.position = (fast_sint_t)d - 1; state[0].state.count = count; } static void libsais_partial_sorting_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = 0, j = count - 1; i < j; i += 2) { libsais_prefetch(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol; SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol; SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += 1; i < j; i += 1) { sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol; SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_right_to_left_8u(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]); } #pragma omp barrier #pragma omp master { sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE]; fast_sint_t c; for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A - B; temp_induction_bucket[c] = A; } for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; } d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position; } } #pragma omp barrier { libsais_partial_sorting_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position); } } #endif } return d; } #endif static void libsais_partial_sorting_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1; fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix; if (threads == 1 || (scan_end - scan_start) < 65536) { libsais_partial_sorting_scan_right_to_left_8u(T, SA, buckets, d, scan_start, scan_end - scan_start); } #if defined(_OPENMP) else { sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t block_start; for (block_start = scan_end - 1; block_start >= scan_start; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < scan_start) { block_max_end = scan_start - 1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } else { d = libsais_partial_sorting_scan_right_to_left_8u_block_omp(T, SA, buckets, d, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetch(&SA[i - 3 * prefetch_distance]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i - prefetch_distance - 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais_prefetchw(&buckets[v0]); sa_sint_t p1 = SA[i - prefetch_distance - 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais_prefetchw(&buckets[v1]); sa_sint_t p2 = SA[i - 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] > T[p2 - 1]); SA[--buckets[v2]] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d; sa_sint_t p3 = SA[i - 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] > T[p3 - 1]); SA[--buckets[v3]] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d; } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); SA[--buckets[v]] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; } return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts2]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts3]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); } sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; } sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; } } return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static void libsais_partial_sorting_scan_right_to_left_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol; } } static void libsais_partial_sorting_scan_right_to_left_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol; } } static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; } cache[i].symbol = symbol; } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[cache[i - prefetch_distance - 0].symbol]); libsais_prefetchw(&buckets[cache[i - prefetch_distance - 1].symbol]); sa_sint_t v0 = cache[i - 0].symbol, p0 = cache[i - 0].index; d += (p0 < 0); cache[i - 0].symbol = --buckets[v0]; cache[i - 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t s = cache[i - 0].symbol, q = (cache[s].index = cache[i - 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } sa_sint_t v1 = cache[i - 1].symbol, p1 = cache[i - 1].index; d += (p1 < 0); cache[i - 1].symbol = --buckets[v1]; cache[i - 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t s = cache[i - 1].symbol, q = (cache[s].index = cache[i - 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = --buckets[v]; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; if (cache[i].symbol >= omp_block_start) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } } return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais_prefetchw(s0 >= 0 ? Ds0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais_prefetchw(s1 >= 0 ? Ds1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { sa_sint_t p0 = cache[i - 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 0].symbol = --induction_bucket[v0 >> 1]; cache[i - 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { sa_sint_t p1 = cache[i - 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 1].symbol = --induction_bucket[v1 >> 1]; cache[i - 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = --induction_bucket[v >> 1]; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } } return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { cache[i - 0].symbol = --induction_bucket[v0]; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { cache[i - 1].symbol = --induction_bucket[v1]; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; }} } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = --induction_bucket[v]; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } } } } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_right_to_left_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_right_to_left_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_partial_sorting_scan_right_to_left_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1; fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix; if (threads == 1 || (scan_end - scan_start) < 65536) { d = libsais_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, scan_start, scan_end - scan_start); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = scan_end - 1; block_start >= scan_start; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < scan_start) { block_end = scan_start - 1; } d = libsais_partial_sorting_scan_right_to_left_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { d = libsais_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } d = libsais_partial_sorting_scan_right_to_left_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } libsais_partial_sorting_scan_right_to_left_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif } static fast_sint_t libsais_partial_sorting_gather_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais_prefetch(&SA[i + prefetch_distance]); sa_sint_t s0 = SA[i + 0]; SA[l] = (s0 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s0 < 0); sa_sint_t s1 = SA[i + 1]; SA[l] = (s1 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s1 < 0); sa_sint_t s2 = SA[i + 2]; SA[l] = (s2 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s2 < 0); sa_sint_t s3 = SA[i + 3]; SA[l] = (s3 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s3 < 0); } for (j += 3; i < j; i += 1) { sa_sint_t s = SA[i]; SA[l] = (s - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s < 0); } return l; } static fast_sint_t libsais_partial_sorting_gather_lms_suffixes_32s_1k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais_prefetch(&SA[i + prefetch_distance]); sa_sint_t s0 = SA[i + 0]; SA[l] = s0 & SAINT_MAX; l += (s0 < 0); sa_sint_t s1 = SA[i + 1]; SA[l] = s1 & SAINT_MAX; l += (s1 < 0); sa_sint_t s2 = SA[i + 2]; SA[l] = s2 & SAINT_MAX; l += (s2 < 0); sa_sint_t s3 = SA[i + 3]; SA[l] = s3 & SAINT_MAX; l += (s3 < 0); } for (j += 3; i < j; i += 1) { sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l += (s < 0); } return l; } static void libsais_partial_sorting_gather_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start; thread_state[omp_thread_num].state.count = libsais_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size) - omp_block_start; } #pragma omp barrier #pragma omp master { fast_sint_t t, position = 0; for (t = 0; t < omp_num_threads; ++t) { if (t > 0 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } position += thread_state[t].state.count; } } } #endif } } static void libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start; thread_state[omp_thread_num].state.count = libsais_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size) - omp_block_start; } #pragma omp barrier #pragma omp master { fast_sint_t t, position = 0; for (t = 0; t < omp_num_threads; ++t) { if (t > 0 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } position += thread_state[t].state.count; } } } #endif } } static void libsais_induce_partial_order_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&buckets[2 * ALPHABET_SIZE], 0, 2 * ALPHABET_SIZE * sizeof(sa_sint_t)); sa_sint_t d = libsais_partial_sorting_scan_left_to_right_8u_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state); libsais_partial_sorting_shift_markers_8u_omp(SA, n, buckets, threads); libsais_partial_sorting_scan_right_to_left_8u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state); } static void libsais_induce_partial_order_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t d = libsais_partial_sorting_scan_left_to_right_32s_6k_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state); libsais_partial_sorting_shift_markers_32s_6k_omp(SA, k, buckets, threads); libsais_partial_sorting_shift_buckets_32s_6k(k, buckets); libsais_partial_sorting_scan_right_to_left_32s_6k_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state); } static void libsais_induce_partial_order_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t d = libsais_partial_sorting_scan_left_to_right_32s_4k_omp(T, SA, n, k, buckets, 0, threads, thread_state); libsais_partial_sorting_shift_markers_32s_4k(SA, n); libsais_partial_sorting_scan_right_to_left_32s_4k_omp(T, SA, n, k, buckets, d, threads, thread_state); libsais_partial_sorting_gather_lms_suffixes_32s_4k_omp(SA, n, threads, thread_state); } static void libsais_induce_partial_order_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, &buckets[1 * k], threads, thread_state); libsais_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, &buckets[0 * k], threads, thread_state); libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state); } static void libsais_induce_partial_order_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_start_32s_1k(k, buckets); libsais_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, buckets, threads, thread_state); libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); libsais_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, buckets, threads, thread_state); libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state); } static sa_sint_t libsais_renumber_lms_suffixes_8u(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]); sa_sint_t p0 = SA[i + 0]; SAm[(p0 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p0 < 0; sa_sint_t p1 = SA[i + 1]; SAm[(p1 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p1 < 0; sa_sint_t p2 = SA[i + 2]; SAm[(p2 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p2 < 0; sa_sint_t p3 = SA[i + 3]; SAm[(p3 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p3 < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { sa_sint_t p = SA[i]; SAm[(p & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p < 0; } return name; } static fast_sint_t libsais_gather_marked_suffixes_8u(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; l -= 1; fast_sint_t i, j; for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - prefetch_distance]); sa_sint_t s0 = SA[i - 0]; SA[l] = s0 & SAINT_MAX; l -= s0 < 0; sa_sint_t s1 = SA[i - 1]; SA[l] = s1 & SAINT_MAX; l -= s1 < 0; sa_sint_t s2 = SA[i - 2]; SA[l] = s2 & SAINT_MAX; l -= s2 < 0; sa_sint_t s3 = SA[i - 3]; SA[l] = s3 & SAINT_MAX; l -= s3 < 0; } for (j -= 3; i >= j; i -= 1) { sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l -= s < 0; } l += 1; return l; } static sa_sint_t libsais_renumber_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t name = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { name = libsais_renumber_lms_suffixes_8u(SA, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais_renumber_lms_suffixes_8u(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return name; } static void libsais_gather_marked_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; if (omp_num_threads == 1) { libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { if (omp_thread_num < omp_num_threads - 1) { thread_state[omp_thread_num].state.position = libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)m + omp_block_start + omp_block_size, omp_block_start, omp_block_size); thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size - thread_state[omp_thread_num].state.position; } else { thread_state[omp_thread_num].state.position = libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size); thread_state[omp_thread_num].state.count = (fast_sint_t)n + (fast_sint_t)fs - thread_state[omp_thread_num].state.position; } } #pragma omp barrier #pragma omp master { fast_sint_t t, position = (fast_sint_t)n + (fast_sint_t)fs; for (t = omp_num_threads - 1; t >= 0; --t) { position -= thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } } #endif } } static sa_sint_t libsais_renumber_and_gather_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t)); sa_sint_t name = libsais_renumber_lms_suffixes_8u_omp(SA, m, threads, thread_state); if (name < m) { libsais_gather_marked_lms_suffixes_8u_omp(SA, n, m, fs, threads, thread_state); } else { fast_sint_t i; for (i = 0; i < m; i += 1) { SA[i] &= SAINT_MAX; } } return name; } static sa_sint_t libsais_renumber_distinct_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]); p0 = SA[i + 0]; SAm[(SA[i + 0] = p0 & SAINT_MAX) >> 1] = name | (p0 & p3 & SAINT_MIN); name += p0 < 0; p1 = SA[i + 1]; SAm[(SA[i + 1] = p1 & SAINT_MAX) >> 1] = name | (p1 & p0 & SAINT_MIN); name += p1 < 0; p2 = SA[i + 2]; SAm[(SA[i + 2] = p2 & SAINT_MAX) >> 1] = name | (p2 & p1 & SAINT_MIN); name += p2 < 0; p3 = SA[i + 3]; SAm[(SA[i + 3] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { p2 = p3; p3 = SA[i]; SAm[(SA[i] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0; } return name; } static void libsais_mark_distinct_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0; for (i = (fast_sint_t)m + omp_block_start, j = (fast_sint_t)m + omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais_prefetchw(&SA[i + prefetch_distance]); p0 = SA[i + 0]; SA[i + 0] = p0 & (p3 | SAINT_MAX); p0 = (p0 == 0) ? p3 : p0; p1 = SA[i + 1]; SA[i + 1] = p1 & (p0 | SAINT_MAX); p1 = (p1 == 0) ? p0 : p1; p2 = SA[i + 2]; SA[i + 2] = p2 & (p1 | SAINT_MAX); p2 = (p2 == 0) ? p1 : p2; p3 = SA[i + 3]; SA[i + 3] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3; } for (j += 3; i < j; i += 1) { p2 = p3; p3 = SA[i]; SA[i] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3; } } static void libsais_clamp_lms_suffixes_length_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais_prefetchw(&SAm[i + prefetch_distance]); SAm[i + 0] = (SAm[i + 0] < 0 ? SAm[i + 0] : 0) & SAINT_MAX; SAm[i + 1] = (SAm[i + 1] < 0 ? SAm[i + 1] : 0) & SAINT_MAX; SAm[i + 2] = (SAm[i + 2] < 0 ? SAm[i + 2] : 0) & SAINT_MAX; SAm[i + 3] = (SAm[i + 3] < 0 ? SAm[i + 3] : 0) & SAINT_MAX; } for (j += 3; i < j; i += 1) { SAm[i] = (SAm[i] < 0 ? SAm[i] : 0) & SAINT_MAX; } } static sa_sint_t libsais_renumber_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t name = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { name = libsais_renumber_distinct_lms_suffixes_32s_4k(SA, m, 1, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 1; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais_renumber_distinct_lms_suffixes_32s_4k(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return name - 1; } static void libsais_mark_distinct_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n >> 1; #endif libsais_mark_distinct_lms_suffixes_32s(SA, m, omp_block_start, omp_block_size); } } static void libsais_clamp_lms_suffixes_length_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n >> 1; #endif libsais_clamp_lms_suffixes_length_32s(SA, m, omp_block_start, omp_block_size); } } static sa_sint_t libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t)); sa_sint_t name = libsais_renumber_distinct_lms_suffixes_32s_4k_omp(SA, m, threads, thread_state); if (name < m) { libsais_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads); } return name; } static sa_sint_t libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; { libsais_gather_lms_suffixes_32s(T, SA, n); memset(&SA[m], 0, ((size_t)n - (size_t)m - (size_t)m) * sizeof(sa_sint_t)); fast_sint_t i, j; for (i = (fast_sint_t)n - (fast_sint_t)m, j = (fast_sint_t)n - 1 - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]); SAm[((sa_uint_t)SA[i + 0]) >> 1] = SA[i + 1] - SA[i + 0] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 1]) >> 1] = SA[i + 2] - SA[i + 1] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 2]) >> 1] = SA[i + 3] - SA[i + 2] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 3]) >> 1] = SA[i + 4] - SA[i + 3] + 1 + SAINT_MIN; } for (j += prefetch_distance + 3; i < j; i += 1) { SAm[((sa_uint_t)SA[i]) >> 1] = SA[i + 1] - SA[i] + 1 + SAINT_MIN; } SAm[((sa_uint_t)SA[n - 1]) >> 1] = 1 + SAINT_MIN; } { libsais_clamp_lms_suffixes_length_32s_omp(SA, n, m, threads); } sa_sint_t name = 1; { fast_sint_t i, j, p = SA[0], plen = SAm[p >> 1]; sa_sint_t pdiff = SAINT_MIN; for (i = 1, j = m - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 0])]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 1])]); fast_sint_t q = SA[i + 0], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN; if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < qlen); qdiff = (sa_sint_t)(l - qlen) & SAINT_MIN; } SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0); p = SA[i + 1]; plen = SAm[p >> 1]; pdiff = SAINT_MIN; if (qlen == plen) { fast_sint_t l = 0; do { if (T[q + l] != T[p + l]) { break; } } while (++l < plen); pdiff = (sa_sint_t)(l - plen) & SAINT_MIN; } SAm[q >> 1] = name | (qdiff & pdiff); name += (pdiff < 0); } for (j += prefetch_distance + 1; i < j; i += 1) { fast_sint_t q = SA[i], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN; if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < plen); qdiff = (sa_sint_t)(l - plen) & SAINT_MIN; } SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0); p = q; plen = qlen; pdiff = qdiff; } SAm[p >> 1] = name | pdiff; name++; } if (name <= m) { libsais_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads); } return name - 1; } static void libsais_reconstruct_lms_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[n - m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 0]]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 1]]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 2]]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 3]]); SA[i + 0] = SAnm[SA[i + 0]]; SA[i + 1] = SAnm[SA[i + 1]]; SA[i + 2] = SAnm[SA[i + 2]]; SA[i + 3] = SAnm[SA[i + 3]]; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[i] = SAnm[SA[i]]; } } static void libsais_reconstruct_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = m; #endif libsais_reconstruct_lms_suffixes(SA, n, m, omp_block_start, omp_block_size); } } static void libsais_place_lms_suffixes_interval_8u(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE]; fast_sint_t c, j = n; for (c = ALPHABET_SIZE - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_interval_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_interval_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { fast_sint_t j = n; if (k > 1) { fast_sint_t c; for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(1, 1)] - (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)]; if (l > 0) { fast_sint_t i = buckets[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_interval_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t m, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; sa_sint_t c = k - 1; fast_sint_t i, l = buckets[c]; for (i = (fast_sint_t)m - 1; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - prefetch_distance - 3]]); sa_sint_t p0 = SA[i - 0]; if (T[p0] != c) { c = T[p0]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p0; sa_sint_t p1 = SA[i - 1]; if (T[p1] != c) { c = T[p1]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p1; sa_sint_t p2 = SA[i - 2]; if (T[p2] != c) { c = T[p2]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p2; sa_sint_t p3 = SA[i - 3]; if (T[p3] != c) { c = T[p3]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p3; } for (; i >= 0; i -= 1) { sa_sint_t p = SA[i]; if (T[p] != c) { c = T[p]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p; } memset(&SA[0], 0, (size_t)l * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_histogram_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[5 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_histogram_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_histogram_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { fast_sint_t j = n; if (k > 1) { fast_sint_t c; for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)]; if (l > 0) { fast_sint_t i = buckets[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_final_bwt_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } static void libsais_final_bwt_aux_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]]; }} sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]]; }} } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } } } } static void libsais_final_sorting_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } static void libsais_final_sorting_scan_left_to_right_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static fast_sint_t libsais_final_bwt_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } return count; } static fast_sint_t libsais_final_sorting_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } return count; } static void libsais_final_order_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais_prefetch(&cache[i + prefetch_distance]); SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; } for (j += 3; i < j; i += 1) { SA[buckets[cache[i].symbol]++] = cache[i].index; } } static void libsais_final_bwt_aux_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais_prefetch(&cache[i + prefetch_distance]); SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; if ((cache[i + 0].index & rm) == 0) { I[(cache[i + 0].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 0].symbol]; } SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 1].symbol]; } SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; if ((cache[i + 2].index & rm) == 0) { I[(cache[i + 2].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 2].symbol]; } SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; if ((cache[i + 3].index & rm) == 0) { I[(cache[i + 3].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 3].symbol]; } } for (j += 3; i < j; i += 1) { SA[buckets[cache[i].symbol]++] = cache[i].index; if ((cache[i].index & rm) == 0) { I[(cache[i].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol]; } } } static void libsais_final_sorting_scan_left_to_right_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol; } } static void libsais_final_sorting_scan_left_to_right_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { cache[i + 0].symbol = induction_bucket[v0]++; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; cache[i + 0].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { cache[i + 1].symbol = induction_bucket[v1]++; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; cache[i + 1].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = induction_bucket[v]++; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } } static void libsais_final_bwt_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_scan_left_to_right_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_bwt_aux_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_aux_scan_left_to_right_8u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_bwt_aux_scan_left_to_right_8u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_left_to_right_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_sorting_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_left_to_right_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_left_to_right_32s(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_final_sorting_scan_left_to_right_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_final_sorting_scan_left_to_right_32s_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static void libsais_final_bwt_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_final_bwt_scan_left_to_right_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } else { libsais_final_bwt_scan_left_to_right_8u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_bwt_aux_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if ((((sa_sint_t)n - 1) & rm) == 0) { I[((sa_sint_t)n - 1) / (rm + 1)] = induction_bucket[T[(sa_sint_t)n - 1]]; } if (threads == 1 || n < 65536) { libsais_final_bwt_aux_scan_left_to_right_8u(T, SA, rm, I, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } } } } else { libsais_final_bwt_aux_scan_left_to_right_8u_block_omp(T, SA, rm, I, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_final_sorting_scan_left_to_right_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } else { libsais_final_sorting_scan_left_to_right_8u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_left_to_right_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_final_sorting_scan_left_to_right_32s(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } libsais_final_sorting_scan_left_to_right_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais_final_bwt_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; sa_sint_t index = -1; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; index = (p0 == 0) ? (sa_sint_t)(i - 0) : index; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; } sa_sint_t p1 = SA[i - 1]; index = (p1 == 0) ? (sa_sint_t)(i - 1) : index; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; index = (p == 0) ? (sa_sint_t)i : index; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; } } return index; } static void libsais_final_bwt_aux_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]] + 1; } } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]] + 1; } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } } } } static void libsais_final_sorting_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } static void libsais_final_sorting_scan_right_to_left_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static fast_sint_t libsais_final_bwt_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p0 : t; } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p1 : t; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p : t; } } return count; } static fast_sint_t libsais_final_bwt_aux_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p0 : t; cache[count + 1].index = p0; count += 2; } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p1 : t; cache[count + 1].index = p1; count += 2; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p : t; cache[count + 1].index = p; count += 2; } } return count; } static fast_sint_t libsais_final_sorting_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } return count; } static void libsais_final_order_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais_prefetch(&cache[i + prefetch_distance]); SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; SA[--buckets[cache[i + 1].symbol]] = cache[i + 1].index; SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; SA[--buckets[cache[i + 3].symbol]] = cache[i + 3].index; } for (j += 3; i < j; i += 1) { SA[--buckets[cache[i].symbol]] = cache[i].index; } } static void libsais_final_bwt_aux_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 6; i < j; i += 8) { libsais_prefetch(&cache[i + prefetch_distance]); SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; if ((cache[i + 1].index & rm) == 0) { I[cache[i + 1].index / (rm + 1)] = buckets[cache[i + 0].symbol] + 1; } SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; if ((cache[i + 3].index & rm) == 0) { I[cache[i + 3].index / (rm + 1)] = buckets[cache[i + 2].symbol] + 1; } SA[--buckets[cache[i + 4].symbol]] = cache[i + 4].index; if ((cache[i + 5].index & rm) == 0) { I[cache[i + 5].index / (rm + 1)] = buckets[cache[i + 4].symbol] + 1; } SA[--buckets[cache[i + 6].symbol]] = cache[i + 6].index; if ((cache[i + 7].index & rm) == 0) { I[cache[i + 7].index / (rm + 1)] = buckets[cache[i + 6].symbol] + 1; } } for (j += 6; i < j; i += 2) { SA[--buckets[cache[i].symbol]] = cache[i].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol] + 1; } } } static void libsais_final_sorting_scan_right_to_left_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol; } } static void libsais_final_sorting_scan_right_to_left_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { cache[i - 0].symbol = --induction_bucket[v0]; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; cache[i - 0].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { cache[i - 1].symbol = --induction_bucket[v1]; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; cache[i - 1].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = --induction_bucket[v]; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } } static void libsais_final_bwt_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_scan_right_to_left_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_bwt_aux_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_aux_scan_right_to_left_8u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_aux_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_bwt_aux_scan_right_to_left_8u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_right_to_left_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_sorting_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_right_to_left_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_right_to_left_32s(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_final_sorting_scan_right_to_left_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_final_sorting_scan_right_to_left_32s_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais_final_bwt_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t index = -1; if (threads == 1 || n < 65536) { index = libsais_final_bwt_scan_right_to_left_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { index = (sa_sint_t)block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < 0) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; } } } else { libsais_final_bwt_scan_right_to_left_8u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif return index; } static void libsais_final_bwt_aux_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_final_bwt_aux_scan_right_to_left_8u(T, SA, rm, I, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * ((LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads) / 2); if (block_max_end < 0) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } } } } else { libsais_final_bwt_aux_scan_right_to_left_8u_block_omp(T, SA, rm, I, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_final_sorting_scan_right_to_left_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < -1) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } else { libsais_final_sorting_scan_right_to_left_8u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_right_to_left_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_final_sorting_scan_right_to_left_32s(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } libsais_final_sorting_scan_right_to_left_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif } static void libsais_clear_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT bucket_start, sa_sint_t * RESTRICT bucket_end, sa_sint_t threads) { fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536) #else UNUSED(threads); UNUSED(n); #endif for (c = 0; c < k; ++c) { if (bucket_end[c] > bucket_start[c]) { memset(&SA[bucket_start[c]], 0, ((size_t)bucket_end[c] - (size_t)bucket_start[c]) * sizeof(sa_sint_t)); } } } static sa_sint_t libsais_induce_final_order_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (!bwt) { libsais_final_sorting_scan_left_to_right_8u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } libsais_final_sorting_scan_right_to_left_8u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state); return 0; } else if (I != NULL) { libsais_final_bwt_aux_scan_left_to_right_8u_omp(T, SA, n, r - 1, I, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } libsais_final_bwt_aux_scan_right_to_left_8u_omp(T, SA, n, r - 1, I, &buckets[7 * ALPHABET_SIZE], threads, thread_state); return 0; } else { libsais_final_bwt_scan_left_to_right_8u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } return libsais_final_bwt_scan_right_to_left_8u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state); } } static void libsais_induce_final_order_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[4 * k], threads, thread_state); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[5 * k], threads, thread_state); } static void libsais_induce_final_order_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[2 * k], threads, thread_state); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[3 * k], threads, thread_state); } static void libsais_induce_final_order_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[1 * k], threads, thread_state); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[0 * k], threads, thread_state); } static void libsais_induce_final_order_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_start_32s_1k(k, buckets); libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, buckets, threads, thread_state); libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, buckets, threads, thread_state); } static sa_sint_t libsais_renumber_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t f, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; sa_sint_t i, j; for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 2 * (sa_sint_t)prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 3 * prefetch_distance]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 0]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 1]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 2]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 3]) >> 1]); sa_uint_t q0 = (sa_uint_t)SA[i + prefetch_distance + 0]; const sa_sint_t * Tq0 = &T[q0]; libsais_prefetchw(SAm[q0 >> 1] < 0 ? Tq0 : NULL); sa_uint_t q1 = (sa_uint_t)SA[i + prefetch_distance + 1]; const sa_sint_t * Tq1 = &T[q1]; libsais_prefetchw(SAm[q1 >> 1] < 0 ? Tq1 : NULL); sa_uint_t q2 = (sa_uint_t)SA[i + prefetch_distance + 2]; const sa_sint_t * Tq2 = &T[q2]; libsais_prefetchw(SAm[q2 >> 1] < 0 ? Tq2 : NULL); sa_uint_t q3 = (sa_uint_t)SA[i + prefetch_distance + 3]; const sa_sint_t * Tq3 = &T[q3]; libsais_prefetchw(SAm[q3 >> 1] < 0 ? Tq3 : NULL); sa_uint_t p0 = (sa_uint_t)SA[i + 0]; sa_sint_t s0 = SAm[p0 >> 1]; if (s0 < 0) { T[p0] |= SAINT_MIN; f++; s0 = i + 0 + SAINT_MIN + f; } SAm[p0 >> 1] = s0 - f; sa_uint_t p1 = (sa_uint_t)SA[i + 1]; sa_sint_t s1 = SAm[p1 >> 1]; if (s1 < 0) { T[p1] |= SAINT_MIN; f++; s1 = i + 1 + SAINT_MIN + f; } SAm[p1 >> 1] = s1 - f; sa_uint_t p2 = (sa_uint_t)SA[i + 2]; sa_sint_t s2 = SAm[p2 >> 1]; if (s2 < 0) { T[p2] |= SAINT_MIN; f++; s2 = i + 2 + SAINT_MIN + f; } SAm[p2 >> 1] = s2 - f; sa_uint_t p3 = (sa_uint_t)SA[i + 3]; sa_sint_t s3 = SAm[p3 >> 1]; if (s3 < 0) { T[p3] |= SAINT_MIN; f++; s3 = i + 3 + SAINT_MIN + f; } SAm[p3 >> 1] = s3 - f; } for (j += 2 * (sa_sint_t)prefetch_distance + 3; i < j; i += 1) { sa_uint_t p = (sa_uint_t)SA[i]; sa_sint_t s = SAm[p >> 1]; if (s < 0) { T[p] |= SAINT_MIN; f++; s = i + SAINT_MIN + f; } SAm[p >> 1] = s - f; } return f; } static void libsais_compact_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t * pl, fast_sint_t * pr, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAl = &SA[0]; sa_sint_t * RESTRICT SAr = &SA[0]; fast_sint_t i, j, l = *pl - 1, r = *pr - 1; for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0]; SAl[l] = p0 & SAINT_MAX; l -= p0 < 0; SAr[r] = p0 - 1; r -= p0 > 0; sa_sint_t p1 = SA[i - 1]; SAl[l] = p1 & SAINT_MAX; l -= p1 < 0; SAr[r] = p1 - 1; r -= p1 > 0; sa_sint_t p2 = SA[i - 2]; SAl[l] = p2 & SAINT_MAX; l -= p2 < 0; SAr[r] = p2 - 1; r -= p2 > 0; sa_sint_t p3 = SA[i - 3]; SAl[l] = p3 & SAINT_MAX; l -= p3 < 0; SAr[r] = p3 - 1; r -= p3 > 0; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SAl[l] = p & SAINT_MAX; l -= p < 0; SAr[r] = p - 1; r -= p > 0; } *pl = l + 1; *pr = r + 1; } #if defined(_OPENMP) static sa_sint_t libsais_count_unique_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; sa_sint_t f0 = 0, f1 = 0, f2 = 0, f3 = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]); f0 += SAm[((sa_uint_t)SA[i + 0]) >> 1] < 0; f1 += SAm[((sa_uint_t)SA[i + 1]) >> 1] < 0; f2 += SAm[((sa_uint_t)SA[i + 2]) >> 1] < 0; f3 += SAm[((sa_uint_t)SA[i + 3]) >> 1] < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { f0 += SAm[((sa_uint_t)SA[i]) >> 1] < 0; } return f0 + f1 + f2 + f3; } #endif static sa_sint_t libsais_renumber_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t f = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { f = libsais_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_unique_suffixes(SA, m, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { f = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return f; } static void libsais_compact_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072 && m < fs) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; if (omp_num_threads == 1) { fast_sint_t l = m, r = (fast_sint_t)n + (fast_sint_t)fs; libsais_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &l, &r, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = (fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size; libsais_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &thread_state[omp_thread_num].state.position, &thread_state[omp_thread_num].state.count, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t, position; for (position = m, t = omp_num_threads - 1; t >= 0; --t) { fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1); fast_sint_t count = ((fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_end - thread_state[t].state.position); if (count > 0) { position -= count; memcpy(&SA[position], &SA[thread_state[t].state.position], (size_t)count * sizeof(sa_sint_t)); } } for (position = (fast_sint_t)n + (fast_sint_t)fs, t = omp_num_threads - 1; t >= 0; --t) { fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1); fast_sint_t count = ((fast_sint_t)m + omp_block_end - thread_state[t].state.count); if (count > 0) { position -= count; memcpy(&SA[position], &SA[thread_state[t].state.count], (size_t)count * sizeof(sa_sint_t)); } } } } #endif } memcpy(&SA[(fast_sint_t)n + (fast_sint_t)fs - (fast_sint_t)m], &SA[(fast_sint_t)m - (fast_sint_t)f], (size_t)f * sizeof(sa_sint_t)); } static sa_sint_t libsais_compact_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t f = libsais_renumber_unique_and_nonunique_lms_suffixes_32s_omp(T, SA, m, threads, thread_state); libsais_compact_unique_and_nonunique_lms_suffixes_32s_omp(SA, n, m, fs, f, threads, thread_state); return f; } static void libsais_merge_unique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l]; sa_sint_t i, j; fast_sint_t tmp = *SAnm++; for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 6; i < j; i += 4) { libsais_prefetch(&T[i + prefetch_distance]); sa_sint_t c0 = T[i + 0]; if (c0 < 0) { T[i + 0] = c0 & SAINT_MAX; SA[tmp] = i + 0; i++; tmp = *SAnm++; } sa_sint_t c1 = T[i + 1]; if (c1 < 0) { T[i + 1] = c1 & SAINT_MAX; SA[tmp] = i + 1; i++; tmp = *SAnm++; } sa_sint_t c2 = T[i + 2]; if (c2 < 0) { T[i + 2] = c2 & SAINT_MAX; SA[tmp] = i + 2; i++; tmp = *SAnm++; } sa_sint_t c3 = T[i + 3]; if (c3 < 0) { T[i + 3] = c3 & SAINT_MAX; SA[tmp] = i + 3; i++; tmp = *SAnm++; } } for (j += 6; i < j; i += 1) { sa_sint_t c = T[i]; if (c < 0) { T[i] = c & SAINT_MAX; SA[tmp] = i; i++; tmp = *SAnm++; } } } static void libsais_merge_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l]; fast_sint_t i, j; sa_sint_t tmp = *SAnm++; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais_prefetch(&SA[i + prefetch_distance]); if (SA[i + 0] == 0) { SA[i + 0] = tmp; tmp = *SAnm++; } if (SA[i + 1] == 0) { SA[i + 1] = tmp; tmp = *SAnm++; } if (SA[i + 2] == 0) { SA[i + 2] = tmp; tmp = *SAnm++; } if (SA[i + 3] == 0) { SA[i + 3] = tmp; tmp = *SAnm++; } } for (j += 3; i < j; i += 1) { if (SA[i] == 0) { SA[i] = tmp; tmp = *SAnm++; } } } static void libsais_merge_unique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_merge_unique_lms_suffixes_32s(T, SA, n, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(T, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } libsais_merge_unique_lms_suffixes_32s(T, SA, n, m, count, omp_block_start, omp_block_size); } } #endif } } static void libsais_merge_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { libsais_merge_nonunique_lms_suffixes_32s(SA, n, m, f, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_zero_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = f; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } libsais_merge_nonunique_lms_suffixes_32s(SA, n, m, count, omp_block_start, omp_block_size); } } #endif } } static void libsais_merge_compacted_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_merge_unique_lms_suffixes_32s_omp(T, SA, n, m, threads, thread_state); libsais_merge_nonunique_lms_suffixes_32s_omp(SA, n, m, f, threads, thread_state); } static void libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (f > 0) { memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t)); libsais_count_and_gather_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); libsais_reconstruct_lms_suffixes_omp(SA, n, m - f, threads); memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t)); memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t)); libsais_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state); } else { libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads); } } static void libsais_reconstruct_compacted_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (f > 0) { memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t)); libsais_gather_compacted_lms_suffixes_32s(T, SA, n); libsais_reconstruct_lms_suffixes_omp(SA, n, m - f, threads); memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t)); memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t)); libsais_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state); } else { libsais_gather_lms_suffixes_32s(T, SA, n); libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads); } } static sa_sint_t libsais_main_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fs = fs < (SAINT_MAX - n) ? fs : (SAINT_MAX - n); if (k > 0 && fs / k >= 6) { sa_sint_t alignment = (fs - 1024) / k >= 6 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 6 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 6 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 6 * k]; sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); sa_sint_t first_lms_suffix = SA[n - m]; sa_sint_t left_suffixes_count = libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(T, k, buckets, first_lms_suffix); libsais_radix_sort_lms_suffixes_32s_6k_omp(T, SA, n, m, &buckets[4 * k], threads, thread_state); libsais_radix_sort_set_markers_32s_6k_omp(SA, k, &buckets[4 * k], threads); if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); } libsais_initialize_buckets_for_partial_sorting_32s_6k(T, k, buckets, first_lms_suffix, left_suffixes_count); libsais_induce_partial_order_32s_6k_omp(T, SA, n, k, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state); if (names < m) { sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } libsais_initialize_buckets_start_and_end_32s_4k(k, buckets); libsais_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets); libsais_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state); } else { SA[0] = SA[n - 1]; libsais_initialize_buckets_start_and_end_32s_6k(k, buckets); libsais_place_lms_suffixes_histogram_32s_6k(SA, n, k, m, buckets); libsais_induce_final_order_32s_6k(T, SA, n, k, buckets, threads, thread_state); } return 0; } else if (k > 0 && fs / k >= 4) { sa_sint_t alignment = (fs - 1024) / k >= 4 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 4 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 4 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 4 * k]; sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { libsais_initialize_buckets_for_radix_and_partial_sorting_32s_4k(T, k, buckets, SA[n - m]); libsais_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state); libsais_radix_sort_set_markers_32s_4k_omp(SA, k, &buckets[1], threads); libsais_place_lms_suffixes_interval_32s_4k(SA, n, k, m - 1, buckets); libsais_induce_partial_order_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state); if (names < m) { sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } } else { SA[0] = SA[n - 1]; } libsais_initialize_buckets_start_and_end_32s_4k(k, buckets); libsais_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets); libsais_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state); return 0; } else if (k > 0 && fs / k >= 2) { sa_sint_t alignment = (fs - 1024) / k >= 2 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 2 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 2 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 2 * k]; sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(T, k, buckets, SA[n - m]); libsais_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state); libsais_place_lms_suffixes_interval_32s_2k(SA, n, k, m - 1, buckets); libsais_initialize_buckets_start_and_end_32s_2k(k, buckets); libsais_induce_partial_order_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads); if (names < m) { sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } } else { SA[0] = SA[n - 1]; } libsais_initialize_buckets_end_32s_2k(k, buckets); libsais_place_lms_suffixes_histogram_32s_2k(SA, n, k, m, buckets); libsais_initialize_buckets_start_and_end_32s_2k(k, buckets); libsais_induce_final_order_32s_2k(T, SA, n, k, buckets, threads, thread_state); return 0; } else { sa_sint_t * buffer = fs < k ? (sa_sint_t *)libsais_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096) : (sa_sint_t *)NULL; sa_sint_t alignment = fs - 1024 >= k ? 1024 : 16; sa_sint_t * RESTRICT buckets = fs - alignment >= k ? (sa_sint_t *)libsais_align_up(&SA[n + fs - k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : fs >= k ? &SA[n + fs - k] : buffer; if (buckets == NULL) { return -2; } memset(SA, 0, (size_t)n * sizeof(sa_sint_t)); libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); sa_sint_t m = libsais_radix_sort_lms_suffixes_32s_1k(T, SA, n, buckets); if (m > 1) { libsais_induce_partial_order_32s_1k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads); if (names < m) { if (buffer != NULL) { libsais_free_aligned(buffer); buckets = NULL; } sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_1k_omp(T, SA, n, m, fs, f, threads, thread_state); if (buckets == NULL) { buckets = buffer = (sa_sint_t *)libsais_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096); } if (buckets == NULL) { return -2; } } libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); libsais_place_lms_suffixes_interval_32s_1k(T, SA, k, m, buckets); } libsais_induce_final_order_32s_1k(T, SA, n, k, buckets, threads, thread_state); libsais_free_aligned(buffer); return 0; } } static sa_sint_t libsais_main_8u(const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fs = fs < (SAINT_MAX - n) ? fs : (SAINT_MAX - n); sa_sint_t m = libsais_count_and_gather_lms_suffixes_8u_omp(T, SA, n, buckets, threads, thread_state); libsais_initialize_buckets_start_and_end_8u(buckets, freq); if (m > 0) { sa_sint_t first_lms_suffix = SA[n - m]; sa_sint_t left_suffixes_count = libsais_initialize_buckets_for_lms_suffixes_radix_sort_8u(T, buckets, first_lms_suffix); if (threads > 1 && n >= 65536) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); } libsais_radix_sort_lms_suffixes_8u_omp(T, SA, n, m, buckets, threads, thread_state); if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); } libsais_initialize_buckets_for_partial_sorting_8u(T, buckets, first_lms_suffix, left_suffixes_count); libsais_induce_partial_order_8u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state); sa_sint_t names = libsais_renumber_and_gather_lms_suffixes_8u_omp(SA, n, m, fs, threads, thread_state); if (names < m) { if (libsais_main_32s(SA + n + fs - m, SA, m, names, fs + n - 2 * m, threads, thread_state) != 0) { return -2; } libsais_gather_lms_suffixes_8u_omp(T, SA, n, threads, thread_state); libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads); } libsais_place_lms_suffixes_interval_8u(SA, n, m, buckets); } else { memset(SA, 0, (size_t)n * sizeof(sa_sint_t)); } return libsais_induce_final_order_8u_omp(T, SA, n, bwt, r, I, buckets, threads, thread_state); } static sa_sint_t libsais_main(const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL; sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); sa_sint_t index = buckets != NULL && (thread_state != NULL || threads == 1) ? libsais_main_8u(T, SA, n, buckets, bwt, r, I, fs, freq, threads, thread_state) : -2; libsais_free_aligned(buckets); libsais_free_thread_state(thread_state); return index; } static int32_t libsais_main_int(sa_sint_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t k, sa_sint_t fs, sa_sint_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL; sa_sint_t index = thread_state != NULL || threads == 1 ? libsais_main_32s(T, SA, n, k, fs, threads, thread_state) : -2; libsais_free_thread_state(thread_state); return index; } static sa_sint_t libsais_main_ctx(const LIBSAIS_CONTEXT * ctx, const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq) { return ctx != NULL && (ctx->buckets != NULL && (ctx->thread_state != NULL || ctx->threads == 1)) ? libsais_main_8u(T, SA, n, ctx->buckets, bwt, r, I, fs, freq, (sa_sint_t)ctx->threads, ctx->thread_state) : -2; } static void libsais_bwt_copy_8u(uint8_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8) { libsais_prefetch(&A[i + prefetch_distance]); U[i + 0] = (uint8_t)A[i + 0]; U[i + 1] = (uint8_t)A[i + 1]; U[i + 2] = (uint8_t)A[i + 2]; U[i + 3] = (uint8_t)A[i + 3]; U[i + 4] = (uint8_t)A[i + 4]; U[i + 5] = (uint8_t)A[i + 5]; U[i + 6] = (uint8_t)A[i + 6]; U[i + 7] = (uint8_t)A[i + 7]; } for (j += 7; i < j; i += 1) { U[i] = (uint8_t)A[i]; } } #if defined(_OPENMP) static void libsais_bwt_copy_8u_omp(uint8_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = ((fast_sint_t)n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)n - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n; #endif libsais_bwt_copy_8u(U + omp_block_start, A + omp_block_start, (sa_sint_t)omp_block_size); } } #endif void * libsais_create_ctx(void) { return (void *)libsais_create_ctx_main(1); } void libsais_free_ctx(void * ctx) { libsais_free_ctx_main((LIBSAIS_CONTEXT *)ctx); } int32_t libsais(const uint8_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } return libsais_main(T, SA, n, 0, 0, NULL, fs, freq, 1); } int32_t libsais_int(int32_t * T, int32_t * SA, int32_t n, int32_t k, int32_t fs) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } return libsais_main_int(T, SA, n, k, fs, 1); } int32_t libsais_ctx(const void * ctx, const uint8_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq) { if ((ctx == NULL) || (T == NULL) || (SA == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } return libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, SA, n, 0, 0, NULL, fs, freq); } int32_t libsais_bwt(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } sa_sint_t index = libsais_main(T, A, n, 1, 0, NULL, fs, freq, 1); if (index >= 0) { index++; U[0] = T[n - 1]; libsais_bwt_copy_8u(U + 1, A, index - 1); libsais_bwt_copy_8u(U + index, A + index, n - index); } return index; } int32_t libsais_bwt_aux(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } I[0] = n; return 0; } if (libsais_main(T, A, n, 1, r, I, fs, freq, 1) != 0) { return -2; } U[0] = T[n - 1]; libsais_bwt_copy_8u(U + 1, A, I[0] - 1); libsais_bwt_copy_8u(U + I[0], A + I[0], n - I[0]); return 0; } int32_t libsais_bwt_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq) { if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } sa_sint_t index = libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, 0, NULL, fs, freq); if (index >= 0) { index++; U[0] = T[n - 1]; #if defined(_OPENMP) libsais_bwt_copy_8u_omp(U + 1, A, index - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); libsais_bwt_copy_8u_omp(U + index, A + index, n - index, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); #else libsais_bwt_copy_8u(U + 1, A, index - 1); libsais_bwt_copy_8u(U + index, A + index, n - index); #endif } return index; } int32_t libsais_bwt_aux_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I) { if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } I[0] = n; return 0; } if (libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, r, I, fs, freq) != 0) { return -2; } U[0] = T[n - 1]; #if defined(_OPENMP) libsais_bwt_copy_8u_omp(U + 1, A, I[0] - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); libsais_bwt_copy_8u_omp(U + I[0], A + I[0], n - I[0], (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); #else libsais_bwt_copy_8u(U + 1, A, I[0] - 1); libsais_bwt_copy_8u(U + I[0], A + I[0], n - I[0]); #endif return 0; } #if defined(_OPENMP) void * libsais_create_ctx_omp(int32_t threads) { if (threads < 0) { return NULL; } threads = threads > 0 ? threads : omp_get_max_threads(); return (void *)libsais_create_ctx_main(threads); } int32_t libsais_omp(const uint8_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq, int32_t threads) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0) || (threads < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } threads = threads > 0 ? threads : omp_get_max_threads(); return libsais_main(T, SA, n, 0, 0, NULL, fs, freq, threads); } int32_t libsais_int_omp(int32_t * T, int32_t * SA, int32_t n, int32_t k, int32_t fs, int32_t threads) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0) || (threads < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } threads = threads > 0 ? threads : omp_get_max_threads(); return libsais_main_int(T, SA, n, k, fs, threads); } int32_t libsais_bwt_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (threads < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } threads = threads > 0 ? threads : omp_get_max_threads(); sa_sint_t index = libsais_main(T, A, n, 1, 0, NULL, fs, freq, threads); if (index >= 0) { index++; U[0] = T[n - 1]; libsais_bwt_copy_8u_omp(U + 1, A, index - 1, threads); libsais_bwt_copy_8u_omp(U + index, A + index, n - index, threads); } return index; } int32_t libsais_bwt_aux_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL) || (threads < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0];} I[0] = n; return 0; } threads = threads > 0 ? threads : omp_get_max_threads(); if (libsais_main(T, A, n, 1, r, I, fs, freq, threads) != 0) { return -2; } U[0] = T[n - 1]; libsais_bwt_copy_8u_omp(U + 1, A, I[0] - 1, threads); libsais_bwt_copy_8u_omp(U + I[0], A + I[0], n - I[0], threads); return 0; } #endif static LIBSAIS_UNBWT_CONTEXT * libsais_unbwt_create_ctx_main(sa_sint_t threads) { LIBSAIS_UNBWT_CONTEXT * RESTRICT ctx = (LIBSAIS_UNBWT_CONTEXT *)libsais_alloc_aligned(sizeof(LIBSAIS_UNBWT_CONTEXT), 64); sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais_alloc_aligned(ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t), 4096); uint16_t * RESTRICT fastbits = (uint16_t *)libsais_alloc_aligned((1 + (1 << UNBWT_FASTBITS)) * sizeof(uint16_t), 4096); sa_uint_t * RESTRICT buckets = threads > 1 ? (sa_uint_t *)libsais_alloc_aligned((size_t)threads * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) * sizeof(sa_uint_t), 4096) : NULL; if (ctx != NULL && bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1)) { ctx->bucket2 = bucket2; ctx->fastbits = fastbits; ctx->buckets = buckets; ctx->threads = threads; return ctx; } libsais_free_aligned(buckets); libsais_free_aligned(fastbits); libsais_free_aligned(bucket2); libsais_free_aligned(ctx); return NULL; } static void libsais_unbwt_free_ctx_main(LIBSAIS_UNBWT_CONTEXT * ctx) { if (ctx != NULL) { libsais_free_aligned(ctx->buckets); libsais_free_aligned(ctx->fastbits); libsais_free_aligned(ctx->bucket2); libsais_free_aligned(ctx); } } static void libsais_unbwt_compute_histogram(const uint8_t * RESTRICT T, fast_sint_t n, sa_uint_t * RESTRICT count) { const fast_sint_t prefetch_distance = 256; const uint8_t * RESTRICT T_p = T; if (n >= 1024) { sa_uint_t copy[4 * (ALPHABET_SIZE + 16)]; memset(copy, 0, 4 * (ALPHABET_SIZE + 16) * sizeof(sa_uint_t)); sa_uint_t * RESTRICT copy0 = copy + 0 * (ALPHABET_SIZE + 16); sa_uint_t * RESTRICT copy1 = copy + 1 * (ALPHABET_SIZE + 16); sa_uint_t * RESTRICT copy2 = copy + 2 * (ALPHABET_SIZE + 16); sa_uint_t * RESTRICT copy3 = copy + 3 * (ALPHABET_SIZE + 16); for (; T_p < (uint8_t * )((ptrdiff_t)(T + 63) & (-64)); T_p += 1) { copy0[T_p[0]]++; } fast_uint_t x = ((const uint32_t *)(const void *)T_p)[0], y = ((const uint32_t *)(const void *)T_p)[1]; for (; T_p < (uint8_t * )((ptrdiff_t)(T + n - 8) & (-64)); T_p += 64) { libsais_prefetch(&T_p[prefetch_distance]); fast_uint_t z = ((const uint32_t *)(const void *)T_p)[2], w = ((const uint32_t *)(const void *)T_p)[3]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[4]; y = ((const uint32_t *)(const void *)T_p)[5]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; z = ((const uint32_t *)(const void *)T_p)[6]; w = ((const uint32_t *)(const void *)T_p)[7]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[8]; y = ((const uint32_t *)(const void *)T_p)[9]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; z = ((const uint32_t *)(const void *)T_p)[10]; w = ((const uint32_t *)(const void *)T_p)[11]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[12]; y = ((const uint32_t *)(const void *)T_p)[13]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; z = ((const uint32_t *)(const void *)T_p)[14]; w = ((const uint32_t *)(const void *)T_p)[15]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[16]; y = ((const uint32_t *)(const void *)T_p)[17]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; } copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; T_p += 8; fast_uint_t i; for (i = 0; i < ALPHABET_SIZE; i++) { count[i] += copy0[i] + copy1[i] + copy2[i] + copy3[i]; } } for (; T_p < T + n; T_p += 1) { count[T_p[0]]++; } } static void libsais_unbwt_transpose_bucket2(sa_uint_t * RESTRICT bucket2) { fast_uint_t x, y, c, d; for (x = 0; x != ALPHABET_SIZE; x += 16) { for (c = x; c != x + 16; ++c) { for (d = c + 1; d != x + 16; ++d) { sa_uint_t tmp = bucket2[(d << 8) + c]; bucket2[(d << 8) + c] = bucket2[(c << 8) + d]; bucket2[(c << 8) + d] = tmp; } } for (y = x + 16; y != ALPHABET_SIZE; y += 16) { for (c = x; c != x + 16; ++c) { sa_uint_t * bucket2_yc = &bucket2[(y << 8) + c]; sa_uint_t * bucket2_cy = &bucket2[(c << 8) + y]; sa_uint_t tmp00 = bucket2_yc[ 0 * 256]; bucket2_yc[ 0 * 256] = bucket2_cy[ 0]; bucket2_cy[ 0] = tmp00; sa_uint_t tmp01 = bucket2_yc[ 1 * 256]; bucket2_yc[ 1 * 256] = bucket2_cy[ 1]; bucket2_cy[ 1] = tmp01; sa_uint_t tmp02 = bucket2_yc[ 2 * 256]; bucket2_yc[ 2 * 256] = bucket2_cy[ 2]; bucket2_cy[ 2] = tmp02; sa_uint_t tmp03 = bucket2_yc[ 3 * 256]; bucket2_yc[ 3 * 256] = bucket2_cy[ 3]; bucket2_cy[ 3] = tmp03; sa_uint_t tmp04 = bucket2_yc[ 4 * 256]; bucket2_yc[ 4 * 256] = bucket2_cy[ 4]; bucket2_cy[ 4] = tmp04; sa_uint_t tmp05 = bucket2_yc[ 5 * 256]; bucket2_yc[ 5 * 256] = bucket2_cy[ 5]; bucket2_cy[ 5] = tmp05; sa_uint_t tmp06 = bucket2_yc[ 6 * 256]; bucket2_yc[ 6 * 256] = bucket2_cy[ 6]; bucket2_cy[ 6] = tmp06; sa_uint_t tmp07 = bucket2_yc[ 7 * 256]; bucket2_yc[ 7 * 256] = bucket2_cy[ 7]; bucket2_cy[ 7] = tmp07; sa_uint_t tmp08 = bucket2_yc[ 8 * 256]; bucket2_yc[ 8 * 256] = bucket2_cy[ 8]; bucket2_cy[ 8] = tmp08; sa_uint_t tmp09 = bucket2_yc[ 9 * 256]; bucket2_yc[ 9 * 256] = bucket2_cy[ 9]; bucket2_cy[ 9] = tmp09; sa_uint_t tmp10 = bucket2_yc[10 * 256]; bucket2_yc[10 * 256] = bucket2_cy[10]; bucket2_cy[10] = tmp10; sa_uint_t tmp11 = bucket2_yc[11 * 256]; bucket2_yc[11 * 256] = bucket2_cy[11]; bucket2_cy[11] = tmp11; sa_uint_t tmp12 = bucket2_yc[12 * 256]; bucket2_yc[12 * 256] = bucket2_cy[12]; bucket2_cy[12] = tmp12; sa_uint_t tmp13 = bucket2_yc[13 * 256]; bucket2_yc[13 * 256] = bucket2_cy[13]; bucket2_cy[13] = tmp13; sa_uint_t tmp14 = bucket2_yc[14 * 256]; bucket2_yc[14 * 256] = bucket2_cy[14]; bucket2_cy[14] = tmp14; sa_uint_t tmp15 = bucket2_yc[15 * 256]; bucket2_yc[15 * 256] = bucket2_cy[15]; bucket2_cy[15] = tmp15; } } } } static void libsais_unbwt_compute_bigram_histogram_single(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_uint_t index) { fast_uint_t sum, c; for (sum = 1, c = 0; c < ALPHABET_SIZE; ++c) { fast_uint_t prev = sum; sum += bucket1[c]; bucket1[c] = (sa_uint_t)prev; if (prev != sum) { sa_uint_t * RESTRICT bucket2_p = &bucket2[c << 8]; { fast_uint_t hi = index; if (sum < hi) { hi = sum; } libsais_unbwt_compute_histogram(&T[prev], (fast_sint_t)(hi - prev), bucket2_p); } { fast_uint_t lo = index + 1; if (prev > lo) { lo = prev; } libsais_unbwt_compute_histogram(&T[lo - 1], (fast_sint_t)(sum - lo), bucket2_p); } } } libsais_unbwt_transpose_bucket2(bucket2); } static void libsais_unbwt_calculate_fastbits(sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t lastc, fast_uint_t shift) { fast_uint_t v, w, sum, c, d; for (v = 0, w = 0, sum = 1, c = 0; c < ALPHABET_SIZE; ++c) { if (c == lastc) { sum += 1; } for (d = 0; d < ALPHABET_SIZE; ++d, ++w) { fast_uint_t prev = sum; sum += bucket2[w]; bucket2[w] = (sa_uint_t)prev; if (prev != sum) { for (; v <= ((sum - 1) >> shift); ++v) { fastbits[v] = (uint16_t)w; } } } } } static void libsais_unbwt_calculate_biPSI(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_uint_t index, fast_sint_t omp_block_start, fast_sint_t omp_block_end) { { fast_sint_t i = omp_block_start, j = (fast_sint_t)index; if (omp_block_end < j) { j = omp_block_end; } for (; i < j; ++i) { fast_uint_t c = T[i]; fast_uint_t p = bucket1[c]++; fast_sint_t t = (fast_sint_t)(index - p); if (t != 0) { fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c; P[bucket2[w]++] = (sa_uint_t)i; } } } { fast_sint_t i = (fast_sint_t)index, j = omp_block_end; if (omp_block_start > i) { i = omp_block_start; } for (i += 1; i <= j; ++i) { fast_uint_t c = T[i - 1]; fast_uint_t p = bucket1[c]++; fast_sint_t t = (fast_sint_t)(index - p); if (t != 0) { fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c; P[bucket2[w]++] = (sa_uint_t)i; } } } } static void libsais_unbwt_init_single(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits) { sa_uint_t bucket1[ALPHABET_SIZE]; fast_uint_t index = I[0]; fast_uint_t lastc = T[0]; fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } if (freq != NULL) { memcpy(bucket1, freq, ALPHABET_SIZE * sizeof(sa_uint_t)); } else { memset(bucket1, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_histogram(T, n, bucket1); } memset(bucket2, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_bigram_histogram_single(T, bucket1, bucket2, index); libsais_unbwt_calculate_fastbits(bucket2, fastbits, lastc, shift); libsais_unbwt_calculate_biPSI(T, P, bucket1, bucket2, index, 0, n); } #if defined(_OPENMP) static void libsais_unbwt_compute_bigram_histogram_parallel(const uint8_t * RESTRICT T, fast_uint_t index, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { fast_uint_t c = T[i]; fast_uint_t p = bucket1[c]++; fast_sint_t t = (fast_sint_t)(index - p); if (t != 0) { fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c; bucket2[w]++; } } } static void libsais_unbwt_init_parallel(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads) { sa_uint_t bucket1[ALPHABET_SIZE]; fast_uint_t index = I[0]; fast_uint_t lastc = T[0]; fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } memset(bucket1, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); memset(bucket2, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) { fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); if (omp_num_threads == 1) { libsais_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits); } else { sa_uint_t * RESTRICT bucket1_local = buckets + omp_thread_num * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)); sa_uint_t * RESTRICT bucket2_local = bucket1_local + ALPHABET_SIZE; fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; { memset(bucket1_local, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_histogram(T + omp_block_start, omp_block_size, bucket1_local); } #pragma omp barrier #pragma omp master { { sa_uint_t * RESTRICT bucket1_temp = buckets; fast_sint_t t; for (t = 0; t < omp_num_threads; ++t, bucket1_temp += ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) { fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket1[c], B = bucket1_temp[c]; bucket1[c] = A + B; bucket1_temp[c] = A; } } } { fast_uint_t sum, c; for (sum = 1, c = 0; c < ALPHABET_SIZE; ++c) { fast_uint_t prev = sum; sum += bucket1[c]; bucket1[c] = (sa_uint_t)prev; } } } #pragma omp barrier { fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket1[c], B = bucket1_local[c]; bucket1_local[c] = A + B; } memset(bucket2_local, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_bigram_histogram_parallel(T, index, bucket1_local, bucket2_local, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t omp_bucket2_stride = ((ALPHABET_SIZE * ALPHABET_SIZE) / omp_num_threads) & (-16); fast_sint_t omp_bucket2_start = omp_thread_num * omp_bucket2_stride; fast_sint_t omp_bucket2_size = omp_thread_num < omp_num_threads - 1 ? omp_bucket2_stride : (ALPHABET_SIZE * ALPHABET_SIZE) - omp_bucket2_start; sa_uint_t * RESTRICT bucket2_temp = buckets + ALPHABET_SIZE; fast_sint_t t; for (t = 0; t < omp_num_threads; ++t, bucket2_temp += ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) { fast_sint_t c; for (c = omp_bucket2_start; c < omp_bucket2_start + omp_bucket2_size; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_temp[c]; bucket2[c] = A + B; bucket2_temp[c] = A; } } } #pragma omp barrier #pragma omp master { libsais_unbwt_calculate_fastbits(bucket2, fastbits, lastc, shift); { fast_sint_t t; for (t = omp_num_threads - 1; t >= 1; --t) { sa_uint_t * RESTRICT dst_bucket1 = buckets + t * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)); sa_uint_t * RESTRICT src_bucket1 = dst_bucket1 - (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)); memcpy(dst_bucket1, src_bucket1, ALPHABET_SIZE * sizeof(sa_uint_t)); } memcpy(buckets, bucket1, ALPHABET_SIZE * sizeof(sa_uint_t)); } } #pragma omp barrier { fast_sint_t c; for (c = 0; c < ALPHABET_SIZE * ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_local[c]; bucket2_local[c] = A + B; } libsais_unbwt_calculate_biPSI(T, P, bucket1_local, bucket2_local, index, omp_block_start, omp_block_start + omp_block_size); } #pragma omp barrier #pragma omp master { memcpy(bucket2, buckets + ALPHABET_SIZE + (omp_num_threads - 1) * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)), ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); } } } } #endif static void libsais_unbwt_decode_1(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t * i0, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; fast_uint_t i, p0 = *i0; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); } *i0 = p0; } static void libsais_unbwt_decode_2(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); fast_uint_t i, p0 = *i0, p1 = *i1; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); } *i0 = p0; *i1 = p1; } static void libsais_unbwt_decode_3(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); } *i0 = p0; *i1 = p1; *i2 = p2; } static void libsais_unbwt_decode_4(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; } static void libsais_unbwt_decode_5(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; } static void libsais_unbwt_decode_6(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; } static void libsais_unbwt_decode_7(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r); uint16_t * RESTRICT U6 = (uint16_t *)(void *)(((uint8_t *)U5) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5); uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = libsais_bswap16(c6); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; } static void libsais_unbwt_decode_8(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t * i7, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r); uint16_t * RESTRICT U6 = (uint16_t *)(void *)(((uint8_t *)U5) + r); uint16_t * RESTRICT U7 = (uint16_t *)(void *)(((uint8_t *)U6) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6, p7 = *i7; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5); uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = libsais_bswap16(c6); uint16_t c7 = fastbits[p7 >> shift]; if (bucket2[c7] <= p7) { do { c7++; } while (bucket2[c7] <= p7); } p7 = P[p7]; U7[i] = libsais_bswap16(c7); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; *i7 = p7; } static void libsais_unbwt_decode(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_sint_t blocks, fast_uint_t reminder) { fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } fast_uint_t offset = 0; while (blocks > 8) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7]; libsais_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, (fast_uint_t)r >> 1); I += 8; blocks -= 8; offset += 8 * (fast_uint_t)r; } if (blocks == 1) { fast_uint_t i0 = I[0]; libsais_unbwt_decode_1(U + offset, P, bucket2, fastbits, shift, &i0, reminder >> 1); } else if (blocks == 2) { fast_uint_t i0 = I[0], i1 = I[1]; libsais_unbwt_decode_2(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, reminder >> 1); libsais_unbwt_decode_1(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, &i0, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 3) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2]; libsais_unbwt_decode_3(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, reminder >> 1); libsais_unbwt_decode_2(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 4) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3]; libsais_unbwt_decode_4(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, reminder >> 1); libsais_unbwt_decode_3(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 5) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4]; libsais_unbwt_decode_5(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, reminder >> 1); libsais_unbwt_decode_4(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 6) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5]; libsais_unbwt_decode_6(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, reminder >> 1); libsais_unbwt_decode_5(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 7) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6]; libsais_unbwt_decode_7(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, reminder >> 1); libsais_unbwt_decode_6(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7]; libsais_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, reminder >> 1); libsais_unbwt_decode_7(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, ((fast_uint_t)r >> 1) - (reminder >> 1)); } } static void libsais_unbwt_decode_omp(const uint8_t * RESTRICT T, uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_sint_t threads) { fast_uint_t lastc = T[0]; fast_sint_t blocks = 1 + (((fast_sint_t)n - 1) / (fast_sint_t)r); fast_uint_t reminder = (fast_uint_t)n - ((fast_uint_t)r * ((fast_uint_t)blocks - 1)); #if defined(_OPENMP) fast_sint_t max_threads = blocks < threads ? blocks : threads; #pragma omp parallel num_threads(max_threads) if(max_threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = blocks / omp_num_threads; fast_sint_t omp_block_reminder = blocks % omp_num_threads; fast_sint_t omp_block_size = omp_block_stride + (omp_thread_num < omp_block_reminder); fast_sint_t omp_block_start = omp_block_stride * omp_thread_num + (omp_thread_num < omp_block_reminder ? omp_thread_num : omp_block_reminder); libsais_unbwt_decode(U + r * omp_block_start, P, n, r, I + omp_block_start, bucket2, fastbits, omp_block_size, omp_thread_num < omp_num_threads - 1 ? (fast_uint_t)r : reminder); } U[n - 1] = (uint8_t)lastc; } static sa_sint_t libsais_unbwt_core(const uint8_t * RESTRICT T, uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads) { #if defined(_OPENMP) if (threads > 1 && n >= 262144) { libsais_unbwt_init_parallel(T, P, n, freq, I, bucket2, fastbits, buckets, threads); } else #else UNUSED(buckets); #endif { libsais_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits); } libsais_unbwt_decode_omp(T, U, P, n, r, I, bucket2, fastbits, threads); return 0; } static sa_sint_t libsais_unbwt_main(const uint8_t * T, uint8_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I, sa_sint_t threads) { fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais_alloc_aligned(ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t), 4096); uint16_t * RESTRICT fastbits = (uint16_t *)libsais_alloc_aligned(((size_t)1 + (size_t)(n >> shift)) * sizeof(uint16_t), 4096); sa_uint_t * RESTRICT buckets = threads > 1 && n >= 262144 ? (sa_uint_t *)libsais_alloc_aligned((size_t)threads * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) * sizeof(sa_uint_t), 4096) : NULL; sa_sint_t index = bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1 || n < 262144) ? libsais_unbwt_core(T, U, P, n, freq, r, I, bucket2, fastbits, buckets, threads) : -2; libsais_free_aligned(buckets); libsais_free_aligned(fastbits); libsais_free_aligned(bucket2); return index; } static sa_sint_t libsais_unbwt_main_ctx(const LIBSAIS_UNBWT_CONTEXT * ctx, const uint8_t * T, uint8_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I) { return ctx != NULL && ctx->bucket2 != NULL && ctx->fastbits != NULL && (ctx->buckets != NULL || ctx->threads == 1) ? libsais_unbwt_core(T, U, P, n, freq, r, I, ctx->bucket2, ctx->fastbits, ctx->buckets, (sa_sint_t)ctx->threads) : -2; } void * libsais_unbwt_create_ctx(void) { return (void *)libsais_unbwt_create_ctx_main(1); } void libsais_unbwt_free_ctx(void * ctx) { libsais_unbwt_free_ctx_main((LIBSAIS_UNBWT_CONTEXT *)ctx); } int32_t libsais_unbwt(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i) { return libsais_unbwt_aux(T, U, A, n, freq, n, &i); } int32_t libsais_unbwt_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i) { return libsais_unbwt_aux_ctx(ctx, T, U, A, n, freq, n, &i); } int32_t libsais_unbwt_aux(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } return libsais_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, 1); } int32_t libsais_unbwt_aux_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } return libsais_unbwt_main_ctx((const LIBSAIS_UNBWT_CONTEXT *)ctx, T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I); } #if defined(_OPENMP) void * libsais_unbwt_create_ctx_omp(int32_t threads) { if (threads < 0) { return NULL; } threads = threads > 0 ? threads : omp_get_max_threads(); return (void *)libsais_unbwt_create_ctx_main(threads); } int32_t libsais_unbwt_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i, int32_t threads) { return libsais_unbwt_aux_omp(T, U, A, n, freq, n, &i, threads); } int32_t libsais_unbwt_aux_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL) || (threads < 0)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } threads = threads > 0 ? threads : omp_get_max_threads(); return libsais_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, threads); } #endif
Statistics.h
#ifndef DEF_STATISTICS #define DEF_STATISTICS /* ========================================================================= Copyright (c) 2008-2015, Institute for Microelectronics, TU Wien. ----------------- ViennaTS - The Vienna Topography Simulator ----------------- Contact: viennats@iue.tuwien.ac.at License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ #include "Math.h" #include <cmath> #include <cstdlib> #if defined(_OPENMP) #include <omp.h> #endif //#include "sprng/sprng.h" #include <chrono> #include <random> #include "message.h" #include <vector> #include <fstream> #include <iostream> namespace my { ///Contains Random number generation algorythms and other statistical tools. namespace stat { static const double epsilon=1e-10; unsigned int ClockSEED = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator(ClockSEED); std::uniform_real_distribution<double> distribution(0.0,1.0); using namespace math; int* rng; #pragma omp threadprivate (rng) inline double RandomNumber() { return distribution(generator); } inline void PickRandomPointOnUnitCircle(double& a, double& b) { //better on AMD double x,y,x2,y2,x2py2; do { x=RandomNumber()-0.5; x2=x*x; y=RandomNumber()-0.5; y2=y*y; x2py2=x2+y2; } while ((x2py2>=0.25) || (x2py2<=epsilon)); a=(x2-y2)/x2py2; b=2*((x*y)/x2py2); } inline void PickRandomPointOnUnitCircle2(double& a, double& b) { double phi=RandomNumber()*Pi2; a=std::cos(phi); b=std::sin(phi); } inline void PickRandomPointOnUnitSphere(double& x, double& y, double& z) { //better double x2,y2,x2py2; do { x=2*RandomNumber()-1.; x2=x*x; y=2*RandomNumber()-1.; y2=y*y; x2py2=x2+y2; } while (x2py2>=1.); double tmp=2*std::sqrt(1.-x2py2); x*=tmp; y*=tmp; z=1.-2*x2py2; } inline double PowerCosineSineDistributionReturnCosTheta(const double N) { return std::pow(RandomNumber(),1./(N+1.)); } inline double ConeCosineSineDistributionReturnTheta(const double cone_angle) { double u, sqrt_1m_u; double angle; do { u=std::sqrt(RandomNumber()); sqrt_1m_u=std::sqrt(1.-u); angle=cone_angle*sqrt_1m_u; } while (RandomNumber()*angle*u>std::cos(Pi1_2*sqrt_1m_u)*std::sin(angle)); return angle; } inline double ConeCosineSineDistributionReturnCosTheta(const double cone_angle) { double u, sqrt_1m_u; double cosine; double left, right; do { u=std::sqrt(RandomNumber()); sqrt_1m_u=std::sqrt(1.-u); cosine=std::cos(cone_angle*sqrt_1m_u); left=RandomNumber()*cone_angle*sqrt_1m_u*u; left*=left; right=std::cos(Pi1_2*sqrt_1m_u); right*=right; right*=(1.-cosine*cosine); } while (left>right); return cosine; } inline double ConeCosineSineDistributionReturnTheta2(const double cone_angle) { double cosine; double _1_m_cos_cone_angle=1.-std::cos(cone_angle); double angle; double a=Pi1_2/cone_angle; do { cosine=1-RandomNumber()*_1_m_cos_cone_angle; angle=std::acos(cosine); } while (RandomNumber()>std::cos(a*angle)); return angle; } inline double ConeCosineSineDistributionReturnTheta3(const double cone_angle) { double angle; double sqrt; do { sqrt=std::sqrt(RandomNumber()); angle=sqrt*cone_angle; } while (RandomNumber()*angle>std::cos(Pi1_2*sqrt)*std::sin(angle)); return angle; } template<class VecType, class VecType2> inline void Rotate(const VecType& AverageDirection, VecType2& RandomDirection, const double sinphi, const double cosphi, double costheta, const double r2=1.) { costheta=std::min(costheta,1.); double a0; double a1; if (std::fabs(AverageDirection[0])<=std::fabs(AverageDirection[1])) { a0=AverageDirection[0]; a1=AverageDirection[1]; } else { a0=AverageDirection[1]; a1=AverageDirection[0]; } const double a0_a0_m1=1.-a0*a0; const double tmp=std::sqrt(std::max(1.-costheta*costheta,0.)/(r2*a0_a0_m1)); const double tmp_sinphi=tmp*sinphi; const double tmp_cosphi=tmp*cosphi; const double costheta_p_a0_tmp_sinphi=costheta+a0*tmp_sinphi; RandomDirection[0]=a0*costheta-a0_a0_m1*tmp_sinphi; RandomDirection[1]=a1 *costheta_p_a0_tmp_sinphi+AverageDirection[2]*tmp_cosphi; RandomDirection[2]=AverageDirection[2]*costheta_p_a0_tmp_sinphi-a1 *tmp_cosphi; if (a0!=AverageDirection[0]) std::swap(RandomDirection[0],RandomDirection[1]); } template<class VecType, class VecType2> inline void RandomAzimuthalRotation3(const VecType& AverageDirection, VecType2& RandomDirection, const double costheta) { double cosphi, sinphi; PickRandomPointOnUnitCircle(cosphi, sinphi); Rotate(AverageDirection, RandomDirection, sinphi, cosphi, costheta); } template<class VecType, class VecType2> inline void RandomAzimuthalRotation(const VecType& AverageDirection, VecType2& RandomDirection, const double costheta) { double cosphi, sinphi; double r2; do { cosphi=RandomNumber()-0.5; sinphi=RandomNumber()-0.5; r2=cosphi*cosphi+sinphi*sinphi; } while (r2>=0.25 || r2<=epsilon) ; Rotate(AverageDirection, RandomDirection, sinphi, cosphi, costheta, r2); } template<class VecType, class VecType2> inline void RandomAzimuthalRotation2(const VecType& AverageDirection, VecType2& RandomDirection, const double costheta) { double tmp[3]; double dot; do { PickRandomPointOnUnitSphere(tmp[0],tmp[1], tmp[2]); dot=AverageDirection[0]*tmp[0]+AverageDirection[1]*tmp[1]+AverageDirection[2]*tmp[2]; } while (dot>=1.); double r=std::sqrt((1-costheta*costheta)/(1-dot*dot)); RandomDirection[0]=AverageDirection[0]*costheta+(tmp[0]-AverageDirection[0]*dot)*r; RandomDirection[1]=AverageDirection[1]*costheta+(tmp[1]-AverageDirection[1]*dot)*r; RandomDirection[2]=AverageDirection[2]*costheta+(tmp[2]-AverageDirection[2]*dot)*r; } template<class VecType, class VecType2> inline void CosineNDistributedRandomDirection(double N, const VecType& AverageDirection, VecType2& RandomDirection) { double costheta=PowerCosineSineDistributionReturnCosTheta(N); RandomAzimuthalRotation(AverageDirection, RandomDirection, costheta); } template<class VecType, class VecType2> inline void CosineNDistributedRandomDirection(double N, const VecType& AverageDirection, VecType2& RandomDirection, double cos_cutoff_angle) { double costheta; do { costheta=PowerCosineSineDistributionReturnCosTheta(N); } while (costheta<cos_cutoff_angle); RandomAzimuthalRotation(AverageDirection, RandomDirection, costheta); } template<class VecType, class VecType2> inline void Cosine1DistributedRandomDirection(const VecType& AverageDirection, VecType2& RandomDirection, double twice_cos_cutoff_angle=0.) { double tmp; do { PickRandomPointOnUnitSphere(RandomDirection[0], RandomDirection[1], RandomDirection[2]); RandomDirection[0]+=AverageDirection[0]; RandomDirection[1]+=AverageDirection[1]; RandomDirection[2]+=AverageDirection[2]; tmp=std::sqrt(RandomDirection[0]*RandomDirection[0]+RandomDirection[1]*RandomDirection[1]+RandomDirection[2]*RandomDirection[2]); } while (tmp<=twice_cos_cutoff_angle); RandomDirection[0]/=tmp; RandomDirection[1]/=tmp; RandomDirection[2]/=tmp; } template<class VecType, class VecType2> inline void CosAngleDistributedRandomDirection(double Angle,const VecType& AverageDirection, VecType2& RandomDirection) { double costheta=std::cos(ConeCosineSineDistributionReturnTheta(Angle)); RandomAzimuthalRotation(AverageDirection, RandomDirection, costheta); } template<class VecType1, class VecType2, class VecType3, class ValType> inline void NormalDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position) { static const double fac=-std::log(16.); double v0,v1,rsq; do { v0=2.0*RandomNumber()-1.0; v1=2.0*RandomNumber()-1.0; rsq=v0*v0+v1*v1; } while (rsq>=1.0 || rsq<1e-20); Rotate(dir, position,v0,v1, 0.,rsq); rsq=std::sqrt(std::log(rsq)/(rsq*fac))*FWHM; for (int k=0;k<3;k++) { position[k]*=rsq; position[k]+=center[k]; } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline void NormalDistributedStartPosition2(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter) { static const double fac=-std::log(16.); double v0,v1,rsq; do { v0=2.0*RandomNumber()-1.0; v1=2.0*RandomNumber()-1.0; rsq=v0*v0+v1*v1; } while (rsq>=1.0 || rsq<1e-20); Rotate(dir, position,v0,v1, 0.,rsq); rsq=std::sqrt(std::log(rsq)/(fac))*FWHM; for (int k=0;k<3;k++) { position[k]*=rsq; position[k]+=center[k]; } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline void LorentzDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter) { int x=4; // int y; double radius; radius=FWHM/2*std::sqrt(exp(my::math::Pi*RandomNumber())-1); double theta; theta = 2*my::math::Pi*RandomNumber(); for (int i=0;i<3;i++) { if (Parameter.open_boundary==i) { position[i]=center[i]; } else { if (x==4) { x=i; position[i]=radius*std::cos(theta)+center[i]; //position[i]=radius*std::cos(theta)+center[i]; } else { // y=i; position[i]=radius*std::sin(theta)+center[i]; //position[i]=radius*std::sin(theta)+center[i]; } } } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline void SurfaceChargeDensityDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter, double distance) { int x=4, y=4; double radius, randnum; randnum=RandomNumber(); radius=2*FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; radius*=5e-9/distance; double theta; theta = 2*my::math::Pi*RandomNumber(); for (int i=0;i<3;i++) { if (Parameter.open_boundary==i) { position[i]=center[i]; } else { if (x==4) { x=i; position[i]=radius*std::cos(theta)+center[i]; } else { y=i; position[i]=radius*std::sin(theta)+center[i]; } } } } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType> inline double SurfaceChargeDensityDistributedStartPosition(const VecType1& center, const VecType2& dir, ValType FWHM, VecType3& position, const ParameterType& Parameter) { int x=4; // int y; double radius, randnum; randnum=RandomNumber(); radius=2*FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; double theta; theta = 2*my::math::Pi*RandomNumber(); for (int i=0;i<3;i++) { if (Parameter.open_boundary==i) { position[i]=center[i]; } else { if (x==4) { x=i; position[i]=radius*std::cos(theta)+center[i]; //position[i]=radius*std::cos(theta)+center[i]; } else { // y=i; position[i]=radius*std::sin(theta)+center[i]; //position[i]=radius*std::sin(theta)+center[i]; } } } return radius; } template<class VecType1, class VecType2, class VecType3, class ValType, class ParameterType, class PartitionType> inline void SurfaceChargeDensityDistribution(const VecType1& center, const VecType2& dir, ValType voltage, VecType3& position, const ParameterType& Parameter, const PartitionType& Partition) { bool keep; double d; for (int i=0;i<3;i++) d=(Parameter.open_boundary==i)?(center[i]-(Partition.Max(i)-1)*Parameter.grid_delta):d; do { for (int i=0;i<3;i++) position[i]=(Parameter.open_boundary==i)?center[i]:RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; double PositionSumSquares=0; for (int i=0;i<3;i++) PositionSumSquares+=(Parameter.open_boundary!=i)?(position[i]-center[i])*(position[i]-center[i]):0; double scd; scd = (d*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquares+d*d)*sqrt(PositionSumSquares+d*d)); double scd_max; scd_max = (1.6e-19)/(2*(my::math::Pi)*(d*d)); double randomnu; randomnu=(scd_max)*RandomNumber(); keep=scd<randomnu; } while (keep); } template<class VecType1, class VecType2, class ValType, class ParameterType, class PartitionType> inline void SurfaceChargeDensityDistribution(const VecType1& dir, ValType voltage, VecType2& position, const ParameterType& Parameter, const PartitionType& Partition, const std::vector<double>& positions, const std::vector<double>& charges) { //const std::vector<double, std::allocator<double> >&, const std::vector<double, std::allocator<double> >& bool keep; double d=3.7;//e-9; double position_max[3]; position_max[0]=21.5;//25; position_max[1]=43.7; position_max[2]=20.; do { double scd=0; double scd_max=0; for (int i=0;i<3;i++) position[i]=(Parameter.open_boundary==i)?((Partition.Max(i)-1)*Parameter.grid_delta):RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; // position_max[0]=23;//25; // position_max[1]=43.7; // position_max[2]=20; for (unsigned int i=0;i<charges.size();++i){ double PositionSumSquares=0; for (int j=0;j<3;j++) PositionSumSquares+=(Parameter.open_boundary!=j)?(position[j]-positions[3*i+j])*(position[j]-positions[3*i+j]):0; double PositionSumSquaresMax=0; for (int j=0;j<3;j++) PositionSumSquaresMax+=(Parameter.open_boundary!=j)?(position_max[j]-positions[3*i+j])*(position_max[j]-positions[3*i+j]):0; scd += (d*charges[i]*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquares+d*d)*sqrt(PositionSumSquares+d*d)); scd_max += (d*charges[i]*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquaresMax+d*d)*sqrt(PositionSumSquaresMax+d*d)); } double randomnu; randomnu=(scd_max)*RandomNumber()*1.2; keep=scd<randomnu; } while (keep); } template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType> inline double NanowireSurfaceCharge(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Height, ValType Angle, VecType4& position, const ParameterType& Parameter) { double volume_sides, volume_line; volume_sides = 1;//FWHM/3;//2*FWHM/3; volume_line = Length*3/(my::math::Pi*FWHM); double line_or_sides; line_or_sides=(volume_sides+volume_line)*RandomNumber(); double X,Y; int x=4, y=4; for (int i=0;i<3;i++) { if (Parameter.open_boundary!=i) { if (x==4) { x=i; } else { y=i; } } } //double d=3.7e-9; //double d=106.286545976e-9; double radius=0; if (line_or_sides <= volume_line) { // generate a particle in the line double randx, randy; randx=RandomNumber()-0.5; // double radius; radius=2*FWHM*randx/(3*sqrt(1-4*randx*randx)); //radius=2*2*FWHM*randx/(3*sqrt(1-4*randx*randx)); X = radius; randy=RandomNumber(); Y = Length*randy; } else { // generate a particle on the sides double randnum=RandomNumber(); // double radius; radius=FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; //radius=2*FWHM*std::sqrt((1/(1-randnum))*(1/(1-randnum))-1)/3; double theta; theta = 2*my::math::Pi*RandomNumber(); X=radius*std::cos(theta); Y=radius*std::sin(theta); Y += Y>0?Length:0; // radius=2*radius*radius; } for (int i=0;i<3;i++) { if (i==x) { position[i] = cos(-Angle)*(X) - sin(-Angle)*(Y) + StartPosition[i]; } else if (i==y) { position[i] = sin(-Angle)*(X) + cos(-Angle)*(Y) + StartPosition[i]; } else { position[i] = StartPosition[i]; } } return radius; } template<class VecType1, class ParameterType, class PartitionType> inline void Junctionless(VecType1& position, const ParameterType& Parameter, const PartitionType& Partition) { double where = 77.22*RandomNumber(); double X,Z; if (where < 25){ //Box 1 X = 5000*RandomNumber()+10000; Z = 5000*RandomNumber()+5000; } else if (where < 50) { //Box 2 X = 5000*RandomNumber(); Z = 5000*RandomNumber(); } else if (where < 75) { //Box 3 X = 5000*RandomNumber(); Z = 5000*RandomNumber()+10000; } else { //Gate line X = 7400*RandomNumber()+2600; Z = 300*RandomNumber()+7350; } position[0]=X; position[1]=100; position[2]=Z; } template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType, class PartitionType> inline void NanowireSurfaceChargeDistribution(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Angle, VecType4& position, const ParameterType& Parameter, const PartitionType& Partition) { bool keep; double d=0; int x=4; int y=4; for (int i=0;i<3;i++) d=(Parameter.open_boundary==i)?(StartPosition[i]-(Partition.Max(i)-1)*Parameter.grid_delta):d; double scd_max; scd_max = (1.6e-19)/(2*(my::math::Pi)*(d*d)); do { for (int i=0;i<3;i++) { if (Parameter.open_boundary==i){ position[i]=StartPosition[i]; } else { position[i]=RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; if (x==4) { //if x has not yet been assigned x=i; } else { //x has been assigned, now assign y y=i; } } } double alpha; //apha is the angle the line makes with the horizontal direction if ((EndPosition[x]-StartPosition[x])==0) { if ((EndPosition[y]-StartPosition[y]) > 0) { alpha=my::math::Pi/2; } else {//if ((EndPosition[2]-StartPosition[2]) < 0) { alpha=-my::math::Pi/2; } } else { alpha=std::atan((EndPosition[y]-StartPosition[y])/(EndPosition[x]-StartPosition[x])); if ((EndPosition[x]-StartPosition[x])<0) alpha+=my::math::Pi; } double beta; //beta is the angle between start position and position(); if ((position[x]-StartPosition[x])==0) { if ((position[y]-StartPosition[y]) > 0) { beta=my::math::Pi/2; } else if ((position[y]-StartPosition[y]) < 0) { beta=-my::math::Pi/2; } else { //(position[0]-StartPosition[0]) and (position[2]-StartPosition[2]) beta=alpha; } } else { beta=std::atan((position[y]-StartPosition[y])/(position[x]-StartPosition[x])); if ((position[x]-StartPosition[x])<0) beta+=my::math::Pi; } double hypotenuse_sq; hypotenuse_sq = (position[y]-StartPosition[y])*(position[y]-StartPosition[y])+(position[x]-StartPosition[x])*(position[x]-StartPosition[x]); double PositionSumSquares; if (beta<(alpha-my::math::Pi/2) || beta>(alpha+my::math::Pi/2)) { PositionSumSquares=hypotenuse_sq; } else { double theta; theta=beta-alpha; double DirectionalLength; DirectionalLength=std::sqrt(hypotenuse_sq)*std::cos(theta); if (DirectionalLength > Length) { PositionSumSquares = (position[y]-EndPosition[y])*(position[y]-EndPosition[y])+(position[x]-EndPosition[x])*(position[x]-EndPosition[x]); } else { PositionSumSquares = hypotenuse_sq*std::sin(theta)*std::sin(theta); } } double scd; scd = (d*1.6e-19)/(2*(my::math::Pi)*(PositionSumSquares+d*d)*sqrt(PositionSumSquares+d*d)); double randomnu; randomnu=(scd_max)*RandomNumber(); keep=scd<randomnu; } while (keep); } template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType, class PartitionType> inline void NanowireLorentzDistribution(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Angle, VecType4& position, const ParameterType& Parameter, const PartitionType& Partition) { bool keep; int x=4; int y=4; double sigma; sigma=FWHM/2; double cauchy_max; cauchy_max=1/(my::math::Pi*sigma); do { for (int i=0;i<3;i++) { if (Parameter.open_boundary==i){ position[i]=StartPosition[i]; } else { position[i]=RandomNumber()*Partition.Extension(i)*Parameter.grid_delta; if (x==4) { //if x has not yet been assigned x=i; } else { //x has been assigned, now assign y y=i; } } } double alpha; //apha is the angle the line makes with the horizontal direction if ((EndPosition[x]-StartPosition[x])==0) { if ((EndPosition[y]-StartPosition[y]) > 0) { alpha=my::math::Pi/2; } else {//if ((EndPosition[2]-StartPosition[2]) < 0) { alpha=-my::math::Pi/2; } } else { alpha=std::atan((EndPosition[y]-StartPosition[y])/(EndPosition[x]-StartPosition[x])); if ((EndPosition[x]-StartPosition[x])<0) alpha+=my::math::Pi; } double beta; //beta is the angle between start position and position(); if ((position[x]-StartPosition[x])==0) { if ((position[y]-StartPosition[y]) > 0) { beta=my::math::Pi/2; } else if ((position[y]-StartPosition[y]) < 0) { beta=-my::math::Pi/2; } else { //(position[0]-StartPosition[0])==0 and (position[2]-StartPosition[2])==0 beta=alpha; } } else { beta=std::atan((position[y]-StartPosition[y])/(position[x]-StartPosition[x])); if ((position[x]-StartPosition[x])<0) beta+=my::math::Pi; } double hypotenuse_sq; hypotenuse_sq = (position[y]-StartPosition[y])*(position[y]-StartPosition[y])+(position[x]-StartPosition[x])*(position[x]-StartPosition[x]); double rc; if (beta<(alpha-my::math::Pi/2) || beta>(alpha+my::math::Pi/2)) { rc=hypotenuse_sq; } else { double theta; theta=beta-alpha; double DirectionalLength; DirectionalLength=std::sqrt(hypotenuse_sq)*std::cos(theta); if (DirectionalLength > Length) { rc = (position[y]-EndPosition[y])*(position[y]-EndPosition[y])+(position[x]-EndPosition[x])*(position[x]-EndPosition[x]); } else { rc = hypotenuse_sq*std::sin(theta)*std::sin(theta); } } double cauchy; cauchy=1/(my::math::Pi*sigma*(1+(rc/(sigma*sigma)))); double randomnu; randomnu=(cauchy_max)*RandomNumber(); keep=cauchy<randomnu; } while (keep); } //using alternative Monte Carlo template<class VecType1, class VecType2, class VecType3, class VecType4, class ValType, class ParameterType> inline void NanowireLorentzDistribution2(const VecType1& StartPosition, const VecType2 EndPosition, const VecType3& dir, ValType FWHM, ValType Length, ValType Height, ValType Angle, VecType4& position, const ParameterType& Parameter) { double volume_sides, volume_line; volume_sides = 2*Height*(FWHM/2)*(FWHM/2)*(my::math::Pi)*(my::math::Pi); volume_line = Height*(my::math::Pi)*(FWHM/2)*Length; double line_or_sides; line_or_sides=(volume_sides+volume_line)*RandomNumber(); double X,Y; int x=4, y=4; for (int i=0;i<3;i++) { if (Parameter.open_boundary!=i) { if (x==4) { x=i; } else { y=i; } } } if (line_or_sides <= volume_line) { // generate a particle in the line double randx, randy; randx=RandomNumber()-0.5; double radius; radius=std::tan(my::math::Pi*randx); X = FWHM/2*radius; randy=RandomNumber(); Y = Length*randy; } else { // generate a particle on the sides // double rannum; // rannum=RandomNumber(); double radius; //radius=std::sqrt(exp(my::math::Pi*RandomNumber())-1); radius=FWHM/2*std::sqrt(std::abs(exp(2*my::math::Pi*RandomNumber()))-1); double theta; theta = 2*my::math::Pi*RandomNumber(); X=radius*std::cos(theta); Y=radius*std::sin(theta); Y += Y>0?Length:0; } for (int i=0;i<3;i++) { if (i==x) { position[i] = cos(-Angle)*(X) - sin(-Angle)*(Y) + StartPosition[i]; } else if (i==y) { position[i] = sin(-Angle)*(X) + cos(-Angle)*(Y) + StartPosition[i]; } else { position[i] = StartPosition[i]; } } } template<class DropletType, class VecType1, class VecType2, class ParameterType, class PartitionType> inline void ESDDistribution(const DropletType& d, const VecType1& StartPosition, VecType2& Position, double& r, double& q, long double* Velocity, const ParameterType& Parameter, const PartitionType& Partition){ double d_test_v; double d_test_r; //----------------Find the radius distribution---------------------------------- double volume_fraction=0.42*RandomNumber()+0.58; double r_min_inv=1/2.5e-6; double r_max_inv=1/55e-6; double r_max_inv_third=exp(log(r_max_inv)/3); double r_min_inv_third=exp(log(r_min_inv)/3); double radius=1/pow(volume_fraction*(r_max_inv_third-r_min_inv_third)+r_min_inv_third,3.); double eta_a = 2.2e-5; //Ns/m2 double rho_d = 789; // kg/m3 //------------------------------------------------------------------------------ //----------------Calculate the charge given r---------------------------------- double gamma_d = 0.022; double permittivity = 8.854187817e-12; double qd=0.58*8*my::math::Pi*sqrt(gamma_d*permittivity*radius*radius*radius); //------------------------------------------------------------------------------ //----------------Find the initial droplet position (cylidrical)---------------- long double theta_i=0.5*my::math::Pi*RandomNumber()*0.5; // 45 degree spray cone long double phi=2*my::math::Pi*RandomNumber(); double location_radius=0.5; double z_star1 = 1-location_radius*cos(theta_i); // height double r_star1 = location_radius*sin(theta_i); // radius double mass = 4*my::math::Pi*rho_d*radius*radius*radius/3; //---FIND THE INITIAL AND THERMALDROPLET ELECTRICAL FORCES AND ACCELLERATIONS--- double H = StartPosition[Parameter.open_boundary]; //270 mm or 0.27 m double Phi_0 = 10e3; //V double R = 1e-3; //outer radius of the nozzle (guess) double K_V = 1-exp(-0.021*H/R); //non-dimensional related to H/R ratio double Phi_star = K_V/(log(4*H/R)); double E_e = Phi_0*Phi_star/H; //-----------Find expected initial electric force when E-field acts alone-------- double plusz1 = 1+z_star1;//1+z_star; double minusz1 = 1-z_star1;//1-z_star; double rootplusz1 = std::sqrt(r_star1*r_star1+plusz1*plusz1); double rootminusz1 = std::sqrt(r_star1*r_star1+minusz1*minusz1); double E_v1 = E_e*(1/rootminusz1+1/rootplusz1); double E_r1 = E_e*(plusz1/rootplusz1-minusz1/rootminusz1)/r_star1; double theta=atan(E_r1/E_v1); //-----------Find expected final electric force where E-field acts alone--------- double t_heat = 10e-3; double r_star2 = (E_r1*(H-t_heat))/E_v1; double z_star2 = 0;//t_heat/H; double plusz2 = 1+z_star2;//1+z_star; double minusz2 = 1-z_star2;//1-z_star; double rootplusz2 = std::sqrt(r_star2*r_star2+plusz2*plusz2); double rootminusz2 = std::sqrt(r_star2*r_star2+minusz2*minusz2); double Eth_v2 = E_e*(1/rootminusz2+1/rootplusz2); double Eth_r2 = E_e*(plusz2/rootplusz2-minusz2/rootminusz2)/r_star2; //-----------Use the first and second to come up with linear dependence---------- double E_v = (E_v1-Eth_v2)/(H-t_heat); double E_r = (theta<1e-20)?0:(E_r1-Eth_r2)/((H-t_heat)*tan(theta)); double Fe_v = qd*E_v; double Fe_r = qd*E_r; double ae_v = Fe_v/mass; // initial dependent component double ae_r = Fe_r/mass; // initial dependent component //------------Calculate the required constant component of electric force-------- double Fe_v1 = qd*E_v1; double Fe_r1 = qd*E_r1; double ae_v1 = Fe_v1/mass; // Initial constant component double ae_r1 = (theta<1e-20)?0:Fe_r1/mass; // Initial constant component //------------------------------------------------------------------------------- //------------------------------------------------------------------------------ //--------------Find all forces acting on the droplet--------------------------- // Find the droplet mass - knowns: radius, rho_d // Gravity force component acceleration double g = 9.81; //m/s // Stokes force component acceleration - knowns: rho_d, eta_a double s_f = (4.5*eta_a)/(rho_d*radius*radius); // Electric force component acceleration - knowns: q_d, r_star, z_star; double d0_v = 0; double d0_r = 0; double v_0 = 0; double v0_v = v_0*cos(theta); double v0_r = v_0*sin(theta); // This is for both dimensions //------------------------------------------------------------------------------ //--------------Find new position of droplet after t---------------------------- //First, separate the forces due to velocity/displacement dependences double a_v = g+ae_v1;//+2*a_e; // independent acceleration (mainly gravity) - vertical only double b = s_f; // velocity dependent acceleration - Stokes force - vert and rad double c_v = ae_v; // displacement dependent acceleration - vertical E-force double a_r = ae_r1; //+2*a_e; // independent acceleration - a_e component double c_r = ae_r; // displacement dependent acceleration - radial E-force //--------------Start vertical onlgoings --------------------------------------- long double t_drop; double v0th_v;//=0; double v0th_r;//=0; if (b*b-4*c_v<0) { t_drop=0; d_test_v = H-t_heat; d_test_r = r_star1*H; v0th_v=1; v0th_r=0; } else { double r1_v = (-b+std::sqrt(b*b-4*c_v))/(2); double r2_v = (-b-std::sqrt(b*b-4*c_v))/(2); double B1_v = d0_v*(r2_v)/(r2_v-r1_v); double A1_v = d0_v-B1_v; double B2_v = (v0_v+d0_v)/(r2_v-r1_v); double A2_v = -B2_v;; double C_v = a_v/(r1_v*r2_v); double B3_v = -a_v/(r2_v*(r2_v-r1_v)); double A3_v = -(B3_v+C_v); double iteration_stop=10000; double t_low = 0; double t_high = 1; double t_check = (t_low+t_high)/2; while (true){ d_test_v = (A1_v+A2_v+A3_v)*exp(r1_v*t_check)+(B1_v+B2_v+B3_v)*exp(r2_v*t_check)+C_v; if (t_high-t_low<1e-100) break; if (d_test_v > (H-t_heat)) { t_high=t_check; t_check=(t_high+t_low)/2; } else if (d_test_v < (H-t_heat)) { t_low=t_check; t_check=(t_high+t_low)/2; } if (iteration_stop==0) break; iteration_stop--; } t_drop=t_check; v0th_v = (A1_v+A2_v+A3_v)*(exp(r1_v*t_drop)-1)/r1_v+(B1_v+B2_v+B3_v)*(exp(r2_v*t_drop)-1)/r2_v+C_v*t_drop+v0_v; //---------Now time required to reach heat zone is known------------------------ //---------Can now calculate the radial displacement---------------------------- if (b*b-4*c_r<0) { t_drop=0; d_test_r = 0; v0th_r = 0; } else { double r1_r = (-b+std::sqrt(b*b-4*c_r))/(2); double r2_r = (-b-std::sqrt(b*b-4*c_r))/(2); double B1_r = d0_r*(r2_r)/(r2_r-r1_r); double A1_r = d0_r-B1_r; double B2_r = (v0_r+d0_r)/(r2_r-r1_r); double A2_r = -B2_r;; double C_r = a_r/(r1_r*r2_r); double B3_r = -a_r/(r2_r*(r2_r-r1_r)); double A3_r = -(B3_r+C_r); d_test_r = (A1_r+A2_r+A3_r)*exp(r1_r*t_drop)+(B1_r+B2_r+B3_r)*exp(r2_r*t_drop)+C_r; v0th_r = (A1_r+A2_r+A3_r)*(exp(r1_r*t_drop)-1)/r1_r+(B1_r+B2_r+B3_r)*(exp(r2_r*t_drop)-1)/r2_r+C_r*t_drop+v0_r; } } //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------ THERMAL ZONE CALCULATIONS ----------------------- //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // //---------Now we know z and r on the cusp of the thermal zone------------------ // //---------Reset parameters to include thermal components and re-run ----------- //--------CALCULATE THERMAL EFFECTS IN DROPLET SIZE REDUCTION------------------- double dth_test_v; double dth_test_r; double q0 = 373e-12; // (373 um^2) for water: 88e-12 (m^2) double q1 = 89.1; // (8.91e-5 /um) for water: 4.3e3 (/m) double del_T = 100000; double dK = q0*del_T*(1+2*q1*radius); double r_new = radius-radius*t_drop*exp(log(dK)/3); r=r_new; double qd_new = 0.58*8*my::math::Pi*sqrt(gamma_d*permittivity*r_new*r_new*r_new); double mass_new = 4*my::math::Pi*rho_d*r_new*r_new*r_new/3; // // //---------CALCULATE THE ENERGIES ASSUMING LINEAR REDUCTION TO FINAL------------ // //---------Electric force F_e is constant in this region and small-------------- // double rth_star = d_test_r/H; //d_r/H; double zth_star = 1-d_test_v/H; //1-d_v/H; double d0th_v = 0;//d_test_v; double d0th_r = 0;//d_test_r; double tth_drop = t_drop; double thplusz = 1+zth_star;//1+z_star; double thminusz = 1-zth_star;//1-z_star; double throotplusz = std::sqrt(rth_star*rth_star+thplusz*thplusz); double throotminusz = std::sqrt(rth_star*rth_star+thminusz*thminusz); double Eth_v = E_e*(1/throotminusz+1/throotplusz); double Eth_r = E_e*(thplusz/throotplusz-thminusz/throotminusz)/rth_star; double theta_th = atan(Eth_r/Eth_v); double Feth_v = qd_new*Eth_v; double Feth_r = qd_new*Eth_r; double aeth_v = Feth_v/mass_new; double aeth_r = Feth_r/mass_new; //--------------Find all other forces acting on the droplet--------------------- // Find the droplet mass - knowns: radius, rho_d // Gravity force component acceleration // Stokes force component acceleration - knowns: rho_d, eta_a double sth_f = (4.5*eta_a)/(rho_d*r_new*r_new); // Thermophoretic force double kappa_a = 0.025; double kappa_d = 0.19; double grad_T = 100000; double T = 523; double rho_a = 1.29; double F_th = 3*my::math::Pi*eta_a*eta_a*r_new*3*kappa_a*grad_T/(rho_a*T*(2*kappa_a+kappa_d)); double a_th = F_th/mass_new; // Electric force component acceleration - knowns: q_d, r_star, z_star; double ath_v = g+aeth_v-a_th;//+2*a_e; // independent acceleration (gravity, initial e-force, thermal force) double bth = sth_f; // velocity dependent acceleration - Stokes force double cth_v = aeth_v/t_heat; // displacement dependent acceleration - vertical E-force double ath_r = (theta_th<1e-20)?0:aeth_r; // independent acceleration - Initial electric force double cth_r = (theta_th<1e-20)?0:aeth_r/(t_heat*tan(theta_th)); // displacement dependent acceleration - Radial E-force double v_final_v=0; double v_final_r=0; if (bth*bth-4*cth_v<0) { tth_drop=0; dth_test_v = t_heat; dth_test_r = d_test_r; v_final_v = 1; v_final_r = 0; } else { double r1th_v = (-bth+std::sqrt(bth*bth-4*cth_v))/(2); double r2th_v = (-bth-std::sqrt(bth*bth-4*cth_v))/(2); double B1th_v = d0th_v*(r2th_v)/(r2th_v-r1th_v); double A1th_v = d0th_v-B1th_v; double B2th_v = (v0th_v+d0th_v)/(r2th_v-r1th_v); double A2th_v = -B2th_v;; double Cth_v = ath_v/(r1th_v*r2th_v); double B3th_v = -ath_v/(r2th_v*(r2th_v-r1th_v)); double A3th_v = -(B3th_v+Cth_v); double iteration_stopth =10000; double tth_low = 0; double tth_high = t_drop; double tth_check = (tth_low+tth_high)/2; while (true){ dth_test_v = (A1th_v+A2th_v+A3th_v)*exp(r1th_v*tth_check)+(B1th_v+B2th_v+B3th_v)*exp(r2th_v*tth_check)+Cth_v; if (tth_high-tth_low<1e-10) break; if (dth_test_v > t_heat) { tth_high=tth_check; tth_check=(tth_high+tth_low)/2; } else if (dth_test_v < t_heat) { tth_low=tth_check; tth_check=(tth_high+tth_low)/2; } if (iteration_stopth==0) break; iteration_stopth--; } tth_drop = tth_check; v_final_v = (A1th_v+A2th_v+A3th_v)*(exp(r1th_v*tth_drop)-1)/r1th_v+(B1th_v+B2th_v+B3th_v)*(exp(r2th_v*tth_drop)-1)/r2th_v+Cth_v*tth_drop+v0th_v; //---------Now time required to reach surface is known-------------------------- //---------Can now calculate the radial displacement---------------------------- if (bth*bth-4*cth_r<0) { tth_drop=0; dth_test_r = 0; v_final_r = 0; } else { double r1th_r = (-bth+std::sqrt(bth*bth-4*cth_r))/(2); double r2th_r = (-bth-std::sqrt(bth*bth-4*cth_r))/(2); double B1th_r = d0th_r*(r2th_r)/(r2th_r-r1th_r); double A1th_r = d0th_r-B1th_r; double B2th_r = (v0th_r+d0th_r)/(r2th_r-r1th_r); double A2th_r = -B2th_r;; double Cth_r = ath_r/(r1th_r*r2th_r); double B3th_r = -ath_r/(r2th_r*(r2th_r-r1th_r)); double A3th_r = -(B3th_r+Cth_r); dth_test_r = (A1th_r+A2th_r+A3th_r)*exp(r1th_r*tth_drop)+(B1th_r+B2th_r+B3th_r)*exp(r2th_r*tth_drop)+Cth_r; v_final_r = (A1th_r+A2th_r+A3th_r)*(exp(r1th_r*tth_drop)-1)/r1th_r+(B1th_r+B2th_r+B3th_r)*(exp(r2th_r*tth_drop)-1)/r2th_r+Cth_r*tth_drop+v0th_r; } } Velocity[0] = v_final_r*cos(phi); Velocity[1] = -v_final_v; Velocity[2] = v_final_r*sin(phi); Position[0] = std::sqrt(dth_test_r+d_test_r)*cos(phi)+StartPosition[0]; Position[1] = 0; Position[2] = std::sqrt(dth_test_r+d_test_r)*sin(phi)+StartPosition[2]; r = r_new; q = qd_new; } template<class DropletType, class VecType1, class VecType2, class ParameterType, class PartitionType> inline void EvenlyDistributed(const DropletType& d, const VecType1& StartPosition, VecType2& Position, double& r, double& q, long double* Velocity, const ParameterType& Parameter, const PartitionType& Partition){ Velocity[0] = 0; Velocity[1] = -1; Velocity[2] = 0; double volume_fraction=0.22*RandomNumber()+0.58; double r_min_inv=1/2.5e-6; double r_max_inv=1/55e-6; double r_max_inv_third=exp(log(r_max_inv)/3); double r_min_inv_third=exp(log(r_min_inv)/3); double radius=1/pow(volume_fraction*(r_max_inv_third-r_min_inv_third)+r_min_inv_third,3.); r = radius; Position[0]=RandomNumber()*4*(Partition.Max(0)*Parameter.grid_delta)-2*(Partition.Max(0)*Parameter.grid_delta); Position[1]=0; Position[2]=RandomNumber()*4*(Partition.Max(2)*Parameter.grid_delta)-2*(Partition.Max(2)*Parameter.grid_delta); double gamma_d = 0.022; double permittivity = 8.854187817e-12; double qd=0.58*8*my::math::Pi*sqrt(gamma_d*permittivity*radius*radius*radius); q = qd; } template<class VecType1, class VecType2, class ParameterType, class PartitionType> inline void EvenlyDistributed(const VecType1& StartPosition, VecType2& Position, const ParameterType& Parameter, const PartitionType& Partition){ Position[0]=RandomNumber()*(Partition.Max(0)*Parameter.grid_delta-Partition.Min(0)*Parameter.grid_delta)+Partition.Min(0)*Parameter.grid_delta; Position[1]=0; Position[2]=RandomNumber()*(Partition.Max(2)*Parameter.grid_delta-Partition.Min(2)*Parameter.grid_delta)+Partition.Min(2)*Parameter.grid_delta; } template<class DropletType, class VecType1> inline void DiskDistribution(const DropletType d, VecType1& position) { double v0, v1, rsq; do { v0=2.0*RandomNumber()-1.0; v1=2.0*RandomNumber()-1.0; rsq=v0*v0+v1*v1; } while (rsq>=1.0 || rsq<1e-20); position[0] = 2*d.Radius*v0+d.Position[0]; position[1] = 0;//d.Position[1]; position[2] = 2*d.Radius*v1+d.Position[2]; } template<class DataType> bool AnyElement(typename std::vector<DataType> vec, DataType check){ typename std::vector<DataType>::iterator first = vec.begin(), last = vec.end(); while(first!=last){ if(*first==check) return true; ++first; } return false; } } } #endif //DEF_STATISTICS
nodal_residualbased_block_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER ) #define KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* #include <unordered_set> */ /* #ifdef USE_GOOGLE_HASH */ /* #include "sparsehash/dense_hash_set" //included in external libraries */ /* #endif */ #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "includes/kratos_flags.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedBlockBuilderAndSolver * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedBlockBuilderAndSolver : public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedBlockBuilderAndSolver); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedBlockBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >(pNewLinearSystemSolver) { } /** Destructor. */ ~NodalResidualBasedBlockBuilderAndSolver() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param b The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& b) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model const int nelements = static_cast<int>(rModelPart.Elements().size()); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements double start_build = OpenMPUtils::GetCurrentTime(); #pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId ) { # pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; k++) { ModelPart::ElementsContainerType::iterator it = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } //#pragma omp parallel for firstprivate(nconditions, LHS_Contribution, RHS_Contribution, EquationId ) schedule(dynamic, 1024) #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; k++) { ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } } const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; //for (int i = 0; i < A_size; i++) // omp_destroy_lock(&lock_array[i]); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; KRATOS_CATCH("") } void BuildNodally( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; /* std::cout<<"Build Nodally Continuity Equation"<<std::endl; */ //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); /* const double start_build = OpenMPUtils::GetCurrentTime(); */ /* #pragma omp parallel */ { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { VectorType nodalSFDneighboursId=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if(neighSize>1){ const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; LHS_Contribution= ZeroMatrix(neighSize,neighSize); RHS_Contribution= ZeroVector(neighSize); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); double deviatoricCoeff=1.0; double volumetricCoeff=1.0; if(itNode->Is(SOLID)){ deviatoricCoeff = timeInterval*itNode->FastGetSolutionStepValue(YOUNG_MODULUS)/(1.0+itNode->FastGetSolutionStepValue(POISSON_RATIO))*0.5; volumetricCoeff = timeInterval*itNode->FastGetSolutionStepValue(POISSON_RATIO)*itNode->FastGetSolutionStepValue(YOUNG_MODULUS)/((1.0+itNode->FastGetSolutionStepValue(POISSON_RATIO))*(1.0-2.0*itNode->FastGetSolutionStepValue(POISSON_RATIO))) + 2.0*deviatoricCoeff/3.0; } else if(itNode->Is(FLUID)){ deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); volumetricCoeff = timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS); } const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); double deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1); double volumetricDefRate= itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE); /* std::cout<<"totalVolume "<<nodalVolume<<" VolumetricCoeff "<<volumetricCoeff<<std::endl; */ LHS_Contribution(0,0)+= nodalVolume/volumetricCoeff; RHS_Contribution[0] += (-deltaPressure/volumetricCoeff + volumetricDefRate)*nodalVolume; bool stabilizationNeeded=false; if((itNode->Is(FLUID) || (itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(POISSON_RATIO)>0.49))){ stabilizationNeeded=true; }else{ for (unsigned int i = 0; i< neighSize; i++) { unsigned int idNode=nodalSFDneighboursId[i]; EquationId[i]=rModelPart.Nodes()[idNode].GetDof(PRESSURE,xpos).EquationId(); } } if(stabilizationNeeded==true){ /* Vector& rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); */ unsigned int firstRow=0; unsigned int firstCol=0; double meanMeshSize=itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); double characteristicLength=2.0*meanMeshSize; /* double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA); */ double density=itNode->FastGetSolutionStepValue(DENSITY); /* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */ double nodalVelocity=0; if(dimension==2){ nodalVelocity= sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y)); }else if(dimension==3){ nodalVelocity=sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y) + itNode->FastGetSolutionStepValue(VELOCITY_Z)*itNode->FastGetSolutionStepValue(VELOCITY_Z)); } double tauStab= 1.0 * (characteristicLength * characteristicLength * timeInterval) / ( density * nodalVelocity * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval ); /* tauStab*=10.0; */ /* tauStab=0.0000001; */ /* tauStab=100.0; */ LHS_Contribution(0,0)+= +nodalVolume*tauStab*density/(volumetricCoeff*timeInterval); RHS_Contribution[0] += -nodalVolume*tauStab*density/(volumetricCoeff*timeInterval)*(deltaPressure-itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY,0)*timeInterval); if(itNode->Is(FREE_SURFACE)){ /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */ /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */ /* double boundLHScontribution=4.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */ /* std::cout<<"boundLHScontribution "<<boundLHScontribution<<std::endl; */ /* if(itNode->IsNot(RIGID)){ */ LHS_Contribution(0,0) += + 4.0*2.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); RHS_Contribution[0] += - 4.0*2.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE,0); /* } */ /* else { */ /* LHS_Contribution(0,0) += + 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */ /* RHS_Contribution[0] += - 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE,0); */ /* } */ const array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL); Vector& SpatialDefRate=itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); array_1d<double, 3> nodalAcceleration= 0.5*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,1); /* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */ double nodalNormalAcceleration=0; double nodalNormalProjDefRate=0; if(dimension==2){ nodalNormalProjDefRate=Normal[0]*SpatialDefRate[0]*Normal[0] + Normal[1]*SpatialDefRate[1]*Normal[1] + 2*Normal[0]*SpatialDefRate[2]*Normal[1]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */ /* nodalNormalAcceleration=(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))*Normal[0]/timeInterval + */ /* (itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))*Normal[1]/timeInterval; */ nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1]; }else if(dimension==3){ nodalNormalProjDefRate=Normal[0]*SpatialDefRate[0]*Normal[0] + Normal[1]*SpatialDefRate[1]*Normal[1] + Normal[2]*SpatialDefRate[2]*Normal[2] + 2*Normal[0]*SpatialDefRate[3]*Normal[1] + 2*Normal[0]*SpatialDefRate[4]*Normal[2] + 2*Normal[1]*SpatialDefRate[5]*Normal[2]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */ /* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */ } /* RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea; */ double accelerationContribution=2.0*density*nodalNormalAcceleration/meanMeshSize; double deviatoricContribution=8.0*deviatoricCoeff*nodalNormalProjDefRate/(meanMeshSize*meanMeshSize); /* std::cout<<"nodalNormalAcceleration= "<<nodalNormalAcceleration<<std::endl; */ /* std::cout<<"nodalNormalProjDefRate= "<<nodalNormalProjDefRate<<std::endl; */ /* std::cout<<"meanMeshSize "<<meanMeshSize<<std::endl; */ /* accelerationContribution=0; */ /* deviatoricContribution=0; */ /* if(itNode->IsNot(RIGID)){ */ RHS_Contribution[0] += 2.0* tauStab * (accelerationContribution + deviatoricContribution) * nodalVolume; /* }else{ */ /* RHS_Contribution[0] += 1.0/3.0* tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume; */ /* } */ } for (unsigned int i = 0; i< neighSize; i++) { unsigned int idNode=nodalSFDneighboursId[i]; EquationId[i]=rModelPart.Nodes()[idNode].GetDof(PRESSURE,xpos).EquationId(); double Density= rModelPart.Nodes()[idNode].FastGetSolutionStepValue(DENSITY); array_1d<double, 3 >& VolumeAcceleration = rModelPart.Nodes()[idNode].FastGetSolutionStepValue(VOLUME_ACCELERATION); double dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; double dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1]; double dNdZi=0; if(dimension==2){ RHS_Contribution[i] += - tauStab * Density * (dNdXi* VolumeAcceleration[0] + dNdYi* VolumeAcceleration[1]) * nodalVolume; } else if(dimension==3){ dNdZi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+2]; RHS_Contribution[i] += - tauStab * Density * (dNdXi* VolumeAcceleration[0] + dNdYi* VolumeAcceleration[1] + dNdZi* VolumeAcceleration[2]) * nodalVolume; } firstRow=0; for (unsigned int j = 0; j< neighSize; j++) { unsigned int idNodeJ=nodalSFDneighboursId[j]; double dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; double dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1]; if(dimension==2){ ////////////////// Laplacian term for LHS LHS_Contribution(i,j)+= + tauStab * (dNdXi*dNdXj + dNdYi*dNdYj) * nodalVolume; ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += - tauStab * (dNdXi*dNdXj + dNdYi*dNdYj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE,0); } else if(dimension==3){ double dNdZj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+2]; ////////////////// Laplacian term for LHS LHS_Contribution(i,j) += + tauStab * (dNdXi*dNdXj + dNdYi*dNdYj + dNdZi*dNdZj) * nodalVolume; ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += - tauStab * (dNdXi*dNdXj + dNdYi*dNdYj + dNdZi*dNdZj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE,0); } /* std::cout << "dNdXi= " <<dNdXi<< "dNdYi= " <<dNdYi<< "dNdYj= " <<dNdYj<< "dNdXj= " <<dNdXj<< std::endl; */ firstRow+=dimension; } firstCol+=dimension; } } //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif /* AssembleLHS(A, LHS_Contribution, EquationId); */ /* AssembleRHS(b, RHS_Contribution, EquationId); */ } } } /* /\* std::cout<<".... Build Nodally Continuity Equation DONE!"<<std::endl; *\/ */ /* const double stop_build = OpenMPUtils::GetCurrentTime(); */ /* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; */ /* //for (int i = 0; i < A_size; i++) */ /* // omp_destroy_lock(&lock_array[i]); */ /* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; */ KRATOS_CATCH("") } /** * @brief Function to perform the building of the LHS * @details Depending on the implementation choosen the size of the matrix could * be equal to the total number of Dofs or to the number of unrestrained dofs * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A) override { KRATOS_TRY TSystemVectorType tmp(A.size1(), 0.0); this->Build(pScheme, rModelPart, A, tmp); KRATOS_CATCH("") } /** * @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom * and "N" is the total number of degrees of freedom involved. * @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed * degrees of freedom (but keeping the columns!!) * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A) override { KRATOS_TRY TSystemVectorType tmp(A.size1(), 0.0); this->Build(pScheme, rModelPart, A, tmp); KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); //prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, ModelPart& rModelPart ) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING("NodalResidualBasedBlockBuilderAndSolver") << "ATTENTION! setting the RHS to zero!" << std::endl; } //prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY Timer::Start("Build"); /* Build(pScheme, rModelPart, A, b); */ boost::timer build_time; BuildNodally(pScheme, rModelPart, A, b); std::cout << "CONTINUITY EQ: build_time : " << build_time.elapsed() << std::endl; Timer::Stop("Build"); ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); boost::timer solve_time; SystemSolveWithPhysics(A, Dx, b, rModelPart); std::cout << "CONTINUITY EQ: solve_time : " << solve_time.elapsed() << std::endl; Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } /** * @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY BuildRHS(pScheme, rModelPart, b); SystemSolve(A, Dx, b); KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) override { KRATOS_TRY BuildRHSNoDirichlet(pScheme,rModelPart,b); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const std::size_t i = dof_iterator->EquationId(); if (dof_iterator->IsFixed()) b[i] = 0.0f; } KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType& pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = OpenMPUtils::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl; for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl; #pragma omp parallel firstprivate(nelements, ElementalDofList) { #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < nelements; i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing condition loop" << std::endl; ConditionsArrayType& pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i < nconditions; i++) { typename ConditionsArrayType::iterator it = pConditions.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing tree reduction\n" << std::endl; // Here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5*static_cast<double>(old_max)); while (new_max>=1 && new_max != old_max) { if( this->GetEchoLevel() > 2) { //just for debugging std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { std::cout << i << " - " << i+new_max << std::endl; } } std::cout << "********************" << std::endl; } #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i+new_max].begin(), dofs_aux_list[i+new_max].end()); dofs_aux_list[i+new_max].clear(); } } old_max = new_max; new_max = ceil(0.5*static_cast<double>(old_max)); } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it= dofs_aux_list[0].begin(); it!= dofs_aux_list[0].end(); it++) { Doftemp.push_back( it->get() ); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; //Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) { omp_destroy_lock(&mlock_array[i]); } } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) { omp_init_lock(&mlock_array[i]); } #endif KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart& rModelPart ) override { //int free_id = 0; BaseType::mEquationSystemSize = BaseType::mDofSet.size(); int ndofs = static_cast<int>(BaseType::mDofSet.size()); #pragma omp parallel for firstprivate(ndofs) for (int i = 0; i < static_cast<int>(ndofs); i++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i; dof_iterator->SetEquationId(i); } //for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) // dof_iterator->SetEquationId(free_id++); } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY boost::timer contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH(" it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); std::cout << "CONTINUITY EQ: contruct_matrix : " << contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { TSparseSpace::SetToZero(b); //refresh RHS to have the correct reactions BuildRHSNoDirichlet(pScheme, rModelPart, b); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const int i = (dof_iterator)->EquationId(); (dof_iterator)->GetSolutionStepReactionValue() = -b[i]; } //KRATOS_WATCH(__LINE__) } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { std::size_t system_size = A.size1(); std::vector<double> scaling_factors (system_size, 0.0f); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for(int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; if(dof_iterator->IsFixed()) scaling_factors[k] = 0.0f; else scaling_factors[k] = 1.0f; } double* Avalues = A.value_data().begin(); std::size_t* Arow_indices = A.index1_data().begin(); std::size_t* Acol_indices = A.index2_data().begin(); //detect if there is a line of all zeros and set the diagonal to a 1 if this happens #pragma omp parallel for firstprivate(system_size) for(int k = 0; k < static_cast<int>(system_size); ++k) { std::size_t col_begin = Arow_indices[k]; std::size_t col_end = Arow_indices[k+1]; bool empty = true; for (std::size_t j = col_begin; j < col_end; ++j) { if(Avalues[j] != 0.0) { empty = false; break; } } if(empty == true) { A(k,k) = 1.0; b[k] = 0.0; } } #pragma omp parallel for for (int k = 0; k < static_cast<int>(system_size); ++k) { std::size_t col_begin = Arow_indices[k]; std::size_t col_end = Arow_indices[k+1]; double k_factor = scaling_factors[k]; if (k_factor == 0) { // zero out the whole row, except the diagonal for (std::size_t j = col_begin; j < col_end; ++j) if (static_cast<int>(Acol_indices[j]) != k ) Avalues[j] = 0.0; // zero out the RHS b[k] = 0.0; } else { // zero out the column which is associated with the zero'ed row for (std::size_t j = col_begin; j < col_end; ++j) if(scaling_factors[ Acol_indices[j] ] == 0 ) Avalues[j] = 0.0; } } } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { #ifdef _OPENMP for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); mlock_array.resize(0); #endif BaseType::Clear(); } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart& rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ #ifdef _OPENMP std::vector< omp_lock_t > mlock_array; #endif ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& A, ModelPart& rModelPart) { std::cout<<" ConstructMatrixStructure for Continuity equation "<<std::endl; //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t> > indices(equation_size); const std::size_t empty_key = 2*equation_size + 10; #else std::vector<std::unordered_set<std::size_t> > indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; /* #pragma omp parallel */ /* { */ ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { /* VectorType nodalSFDneighboursId=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); */ /* const unsigned int neighSize = nodalSFDneighboursId.size(); */ const unsigned int neighSize =itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER).size(); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); /* const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); */ const unsigned int xpos = itNode->GetDofPosition(PRESSURE); for (unsigned int i = 0; i< neighSize; i++) { /* unsigned int idNode=nodalSFDneighboursId[i]; */ unsigned int idNode=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER)[i]; EquationId[i]=rModelPart.Nodes()[idNode].GetDof(PRESSURE,xpos).EquationId(); } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto& row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } /* for (std::size_t i = 0; i < EquationId.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[EquationId[i]]); */ /* #endif */ /* auto& row_indices = indices[EquationId[i]]; */ /* row_indices.insert(EquationId.begin(), EquationId.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[EquationId[i]]); */ /* #endif */ /* } */ } /* } */ Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii<nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto& row_indices = indices[ids[i]]; row_indices.insert(ids.begin(), ids.end()); #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double* Avalues = A.value_data().begin(); std::size_t* Arow_indices = A.index1_data().begin(); std::size_t* Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i+1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i+1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } indices[i].clear(); //deallocating the memory std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size()+1, nnz); Timer::Stop("MatrixStructure"); /* std::cout<<"..... ConstructMatrixStructure for Continuity equation DONE"<<std::endl; */ } /* virtual void ConstructMatrixStructure( */ /* typename TSchemeType::Pointer pScheme, */ /* TSystemMatrixType& A, */ /* ModelPart& rModelPart) */ /* { */ /* //filling with zero the matrix (creating the structure) */ /* Timer::Start("MatrixStructure"); */ /* // Getting the elements from the model */ /* const int nelements = static_cast<int>(rModelPart.Elements().size()); */ /* // Getting the array of the conditions */ /* const int nconditions = static_cast<int>(rModelPart.Conditions().size()); */ /* ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); */ /* ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); */ /* ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); */ /* const std::size_t equation_size = BaseType::mEquationSystemSize; */ /* #ifdef USE_GOOGLE_HASH */ /* std::vector<google::dense_hash_set<std::size_t> > indices(equation_size); */ /* const std::size_t empty_key = 2*equation_size + 10; */ /* #else */ /* std::vector<std::unordered_set<std::size_t> > indices(equation_size); */ /* #endif */ /* #pragma omp parallel for firstprivate(equation_size) */ /* for (int iii = 0; iii < static_cast<int>(equation_size); iii++) */ /* { */ /* #ifdef USE_GOOGLE_HASH */ /* indices[iii].set_empty_key(empty_key); */ /* #else */ /* indices[iii].reserve(40); */ /* #endif */ /* } */ /* Element::EquationIdVectorType ids(3, 0); */ /* #pragma omp parallel for firstprivate(nelements, ids) */ /* for(int iii=0; iii<nelements; iii++) */ /* { */ /* typename ElementsContainerType::iterator i_element = el_begin + iii; */ /* pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo); */ /* for (std::size_t i = 0; i < ids.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[ids[i]]); */ /* #endif */ /* auto& row_indices = indices[ids[i]]; */ /* row_indices.insert(ids.begin(), ids.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[ids[i]]); */ /* #endif */ /* } */ /* } */ /* #pragma omp parallel for firstprivate(nconditions, ids) */ /* for (int iii = 0; iii<nconditions; iii++) */ /* { */ /* typename ConditionsArrayType::iterator i_condition = cond_begin + iii; */ /* pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo); */ /* for (std::size_t i = 0; i < ids.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[ids[i]]); */ /* #endif */ /* auto& row_indices = indices[ids[i]]; */ /* row_indices.insert(ids.begin(), ids.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[ids[i]]); */ /* #endif */ /* } */ /* } */ /* //count the row sizes */ /* unsigned int nnz = 0; */ /* for (unsigned int i = 0; i < indices.size(); i++) */ /* nnz += indices[i].size(); */ /* A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); */ /* double* Avalues = A.value_data().begin(); */ /* std::size_t* Arow_indices = A.index1_data().begin(); */ /* std::size_t* Acol_indices = A.index2_data().begin(); */ /* //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! */ /* Arow_indices[0] = 0; */ /* for (int i = 0; i < static_cast<int>(A.size1()); i++) */ /* Arow_indices[i+1] = Arow_indices[i] + indices[i].size(); */ /* #pragma omp parallel for */ /* for (int i = 0; i < static_cast<int>(A.size1()); i++) */ /* { */ /* const unsigned int row_begin = Arow_indices[i]; */ /* const unsigned int row_end = Arow_indices[i+1]; */ /* unsigned int k = row_begin; */ /* for (auto it = indices[i].begin(); it != indices[i].end(); it++) */ /* { */ /* Acol_indices[k] = *it; */ /* Avalues[k] = 0.0; */ /* k++; */ /* } */ /* indices[i].clear(); //deallocating the memory */ /* std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); */ /* } */ /* A.set_filled(indices.size()+1, nnz); */ /* Timer::Stop("MatrixStructure"); */ /* } */ //************************************************************************** void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } void Assemble( TSystemMatrixType& A, TSystemVectorType& b, const LocalSystemMatrixType& LHS_Contribution, const LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId #ifdef USE_LOCKS_IN_ASSEMBLY ,std::vector< omp_lock_t >& lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&lock_array[i_global]); b[i_global] += RHS_Contribution(i_local); #else double& r_a = b[i_global]; const double& v_a = RHS_Contribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&lock_array[i_global]); #endif //note that computation of reactions is not performed here! } } //************************************************************************** void AssembleRHS( TSystemVectorType& b, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; // ASSEMBLING THE SYSTEM VECTOR double& b_value = b[i_global]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void BuildRHSNoDirichlet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = rModelPart.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = rModelPart.Conditions(); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements //for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) const int nelements = static_cast<int>(pElements.size()); #pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId) { #pragma omp for schedule(guided, 512) nowait for(int i=0; i<nelements; i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if( (it)->IsDefined(ACTIVE) ) element_is_active = (it)->Is(ACTIVE); if(element_is_active) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } LHS_Contribution.resize(0, 0, false); RHS_Contribution.resize(0, false); // assemble all conditions //for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it) const int nconditions = static_cast<int>(ConditionsArray.size()); //#pragma omp parallel for firstprivate(nconditions, RHS_Contribution, EquationId) schedule(dynamic, 1024) #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; i++) { auto it = ConditionsArray.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if( (it)->IsDefined(ACTIVE) ) condition_is_active = (it)->Is(ACTIVE); if(condition_is_active) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } } KRATOS_CATCH("") } //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId) { double* values_vector = A.value_data().begin(); std::size_t* index1_vector = A.index1_data().begin(); std::size_t* index2_vector = A.index2_data().begin(); size_t left_limit = index1_vector[i]; // size_t right_limit = index1_vector[i+1]; //find the first entry size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector); size_t last_found = EquationId[0]; #ifndef USE_LOCKS_IN_ASSEMBLY double& r_a = values_vector[last_pos]; const double& v_a = Alocal(i_local,0); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += Alocal(i_local,0); #endif //now find all of the other entries size_t pos = 0; for(unsigned int j=1; j<EquationId.size(); j++) { unsigned int id_to_find = EquationId[j]; if(id_to_find > last_found) pos = ForwardFind(id_to_find,last_pos+1,index2_vector); else pos = BackwardFind(id_to_find,last_pos-1,index2_vector); #ifndef USE_LOCKS_IN_ASSEMBLY double& r = values_vector[pos]; const double& v = Alocal(i_local,j); #pragma omp atomic r += v; #else values_vector[pos] += Alocal(i_local,j); #endif last_found = id_to_find; last_pos = pos; } } inline unsigned int ForwardFind(const unsigned int id_to_find, const unsigned int start, const size_t* index_vector) { unsigned int pos = start; while(id_to_find != index_vector[pos]) pos++; return pos; } inline unsigned int BackwardFind(const unsigned int id_to_find, const unsigned int start, const size_t* index_vector) { unsigned int pos = start; while(id_to_find != index_vector[pos]) pos--; return pos; } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedBlockBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
read_matrix.h
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Include/demos.h: include file for all demo programs //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #ifndef GRAPHBLAS_DEMOS_H #define GRAPHBLAS_DEMOS_H #include <stdbool.h> #include "GraphBLAS.h" // #include "simple_rand.h" // #include "simple_timer.h" // #include "usercomplex.h" //------------------------------------------------------------------------------ // manage compiler warnings //------------------------------------------------------------------------------ #if defined __INTEL_COMPILER #pragma warning (disable: 58 167 144 177 181 186 188 589 593 869 981 1418 1419 1572 1599 2259 2282 2557 2547 3280 ) #elif defined __GNUC__ #pragma GCC diagnostic ignored "-Wunknown-pragmas" //#pragma GCC diagnostic ignored "-Wunknown-warning-option" #pragma GCC diagnostic ignored "-Wformat-truncation=" #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wunused-result" #pragma GCC diagnostic ignored "-Wint-in-bool-context" #pragma GCC diagnostic ignored "-Wunused-parameter" // #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wtype-limits" #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" // enable these warnings as errors #pragma GCC diagnostic error "-Wmisleading-indentation" #pragma GCC diagnostic error "-Wswitch-default" #endif #undef MIN #undef MAX #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX(a,b) (((a) > (b)) ? (a) : (b)) GrB_Info read_matrix // read a double-precision matrix ( GrB_Matrix *A, // handle of matrix to create FILE *f, // file to read the tuples from bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then remove self edges from A bool one_based, // if true, input matrix is 1-based bool boolean, // if true, input is GrB_BOOL, otherwise GrB_FP64 bool printstuff // if true, print status to stdout ) ; extern int32_t level ; #pragma omp threadprivate(level) // multiplicative scaling factor for ipagerank, ZSCALE = 2^30 #define ZSCALE ((uint64_t) 1073741824) //------------------------------------------------------------------------------ // CHECK: expr must be true; if not, return an error condition //------------------------------------------------------------------------------ // the #include'ing file must define the FREE_ALL macro #define CHECK(expr,info) \ { \ if (! (expr)) \ { \ /* free the result and all workspace, and return NULL */ \ FREE_ALL ; \ printf ("Failure: line %d file %s\n", __LINE__, __FILE__) ; \ return (info) ; \ } \ } //------------------------------------------------------------------------------ // OK: call a GraphBLAS method and check the result //------------------------------------------------------------------------------ // OK(method) is a macro that calls a GraphBLAS method and checks the status; // if a failure occurs, it handles the error via the CHECK macro above, and // returns the error status to the caller. #define OK(method) \ { \ info = method ; \ if (info != GrB_SUCCESS) \ { \ printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \ CHECK (false, info) ; \ } \ } #endif //------------------------------------------------------------------------------ // GraphBLAS/Demo/Source/read_matrix.c: read a matrix from stdin //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Reads a matrix from stdin. For sample inputs, see the Matrix/* files. // Each line has the form: // // i j x // // where i and j are the row and column indices, and x is the value. // The matrix is read in double precision. // free all workspace; this used by the OK(...) macro if an error occurs #define FREE_ALL \ if (I != NULL) free (I) ; \ if (J != NULL) free (J) ; \ if (X != NULL) free (X) ; \ if (I2 != NULL) free (I2) ; \ if (J2 != NULL) free (J2) ; \ if (X2 != NULL) free (X2) ; \ GrB_free (&scale2_op) ; \ GrB_free (&dt2) ; \ GrB_free (&dt1) ; \ GrB_free (&A) ; \ GrB_free (&B) ; \ GrB_free (&C) ; //------------------------------------------------------------------------------ // unary operator to divide by 2 //------------------------------------------------------------------------------ void scale2 (double *z, const double *x) { (*z) = (*x) / 2.0 ; } //------------------------------------------------------------------------------ // read a matrix from a file //------------------------------------------------------------------------------ GrB_Info read_matrix // read a double-precision or boolean matrix ( GrB_Matrix *A_output, // handle of matrix to create FILE *f, // file to read the tuples from bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then remove self edges from A bool one_based, // if true, input matrix is 1-based bool boolean, // if true, input is GrB_BOOL, otherwise GrB_FP64 bool pr // if true, print status to stdout ) { int64_t len = 256 ; int64_t ntuples = 0 ; double x ; GrB_Index nvals ; //-------------------------------------------------------------------------- // set all pointers to NULL so that FREE_ALL can free everything safely //-------------------------------------------------------------------------- GrB_Matrix C = NULL, A = NULL, B = NULL ; GrB_Descriptor dt1 = NULL, dt2 = NULL ; GrB_UnaryOp scale2_op = NULL ; //-------------------------------------------------------------------------- // allocate initial space for tuples //-------------------------------------------------------------------------- size_t xsize = ((boolean) ? sizeof (bool) : sizeof (double)) ; GrB_Index *I = malloc (len * sizeof (int64_t)), *I2 = NULL ; GrB_Index *J = malloc (len * sizeof (int64_t)), *J2 = NULL ; void *X = malloc (len * xsize) ; bool *Xbool ; double *Xdouble ; void *X2 = NULL ; if (I == NULL || J == NULL || X == NULL) { // out of memory if (pr) printf ("out of memory for initial tuples\n") ; FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Xbool = (bool *) X ; Xdouble = (double *) X ; //-------------------------------------------------------------------------- // read in the tuples from stdin, one per line //-------------------------------------------------------------------------- // format warnings vary with compilers, so read in as double double i2, j2 ; while (fscanf (f, "%lg %lg %lg\n", &i2, &j2, &x) != EOF) { int64_t i = (int64_t) i2 ; int64_t j = (int64_t) j2 ; if (ntuples >= len) { I2 = realloc (I, 2 * len * sizeof (int64_t)) ; J2 = realloc (J, 2 * len * sizeof (int64_t)) ; X2 = realloc (X, 2 * len * xsize) ; if (I2 == NULL || J2 == NULL || X2 == NULL) { if (pr) printf ("out of memory for tuples\n") ; FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } I = I2 ; I2 = NULL ; J = J2 ; J2 = NULL ; X = X2 ; X2 = NULL ; len = len * 2 ; Xbool = (bool *) X ; Xdouble = (double *) X ; } if (one_based) { i-- ; j-- ; } I [ntuples] = i ; J [ntuples] = j ; if (boolean) { Xbool [ntuples] = (x != 0) ; } else { Xdouble [ntuples] = x ; } ntuples++ ; } //-------------------------------------------------------------------------- // find the dimensions //-------------------------------------------------------------------------- if (pr) printf ("ntuples: %.16g\n", (double) ntuples) ; int64_t nrows = 0 ; int64_t ncols = 0 ; for (int64_t k = 0 ; k < ntuples ; k++) { nrows = MAX (nrows, I [k]) ; ncols = MAX (ncols, J [k]) ; } nrows++ ; ncols++ ; if (pr) printf ("nrows %.16g ncols %.16g\n", (double) nrows, (double) ncols) ; //-------------------------------------------------------------------------- // prune self edges //-------------------------------------------------------------------------- // but not if creating the augmented system aka a bipartite graph double tic [2], t1 ; if (no_self_edges && ! (make_symmetric && nrows != ncols)) { int64_t ntuples2 = 0 ; for (int64_t k = 0 ; k < ntuples ; k++) { if (I [k] != J [k]) { // keep this off-diagonal edge I [ntuples2] = I [k] ; J [ntuples2] = J [k] ; if (boolean) { Xbool [ntuples2] = Xbool [k] ; } else { Xdouble [ntuples2] = Xdouble [k] ; } ntuples2++ ; } } ntuples = ntuples2 ; } //-------------------------------------------------------------------------- // build the matrix, summing up duplicates, and then free the tuples //-------------------------------------------------------------------------- GrB_Type xtype ; GrB_BinaryOp xop, xop2 ; if (boolean) { xtype = GrB_BOOL ; xop = GrB_LOR ; xop2 = GrB_FIRST_BOOL ; } else { xtype = GrB_FP64 ; xop = GrB_PLUS_FP64 ; xop2 = GrB_FIRST_FP64 ; } GrB_Info info ; OK (GrB_Matrix_new (&C, xtype, nrows, ncols)) ; if (boolean) { OK (GrB_Matrix_build (C, I, J, Xbool, ntuples, xop)) ; } else { OK (GrB_Matrix_build (C, I, J, Xdouble, ntuples, xop)) ; } free (I) ; I = NULL ; free (J) ; J = NULL ; free (X) ; X = NULL ; //-------------------------------------------------------------------------- // construct the descriptors //-------------------------------------------------------------------------- // descriptor dt2: transpose the 2nd input OK (GrB_Descriptor_new (&dt2)) ; OK (GrB_Descriptor_set (dt2, GrB_INP1, GrB_TRAN)) ; // descriptor dt1: transpose the 1st input OK (GrB_Descriptor_new (&dt1)) ; OK (GrB_Descriptor_set (dt1, GrB_INP0, GrB_TRAN)) ; //-------------------------------------------------------------------------- // create the output matrix //-------------------------------------------------------------------------- if (make_symmetric) { //---------------------------------------------------------------------- // ensure the matrix is symmetric //---------------------------------------------------------------------- if (pr) printf ("make symmetric\n") ; if (nrows == ncols) { //------------------------------------------------------------------ // A = (C+C')/2 //------------------------------------------------------------------ if (pr) printf ("A = (C+C')/2\n") ; double tic [2], t ; OK (GrB_Matrix_new (&A, xtype, nrows, nrows)) ; OK (GrB_eWiseAdd (A, NULL, NULL, xop, C, C, dt2)) ; OK (GrB_free (&C)) ; if (boolean) { *A_output = A ; A = NULL ; } else { OK (GrB_Matrix_new (&C, xtype, nrows, nrows)) ; OK (GrB_UnaryOp_new (&scale2_op, scale2, xtype, xtype)) ; OK (GrB_apply (C, NULL, NULL, scale2_op, A, NULL)) ; OK (GrB_free (&A)) ; OK (GrB_free (&scale2_op)) ; *A_output = C ; C = NULL ; } } else { //------------------------------------------------------------------ // A = [0 C ; C' 0], a bipartite graph //------------------------------------------------------------------ // no self edges will exist if (pr) printf ("A = [0 C ; C' 0], a bipartite graph\n") ; double tic [2], t ; int64_t n = nrows + ncols ; OK (GrB_Matrix_new (&A, xtype, n, n)) ; GrB_Index I_range [3], J_range [3] ; I_range [GxB_BEGIN] = 0 ; I_range [GxB_END ] = nrows-1 ; J_range [GxB_BEGIN] = nrows ; J_range [GxB_END ] = ncols+nrows-1 ; // A (nrows:n-1, 0:nrows-1) += C' OK (GrB_assign (A, NULL, xop2, // or NULL, C, J_range, GxB_RANGE, I_range, GxB_RANGE, dt1)) ; // A (0:nrows-1, nrows:n-1) += C OK (GrB_assign (A, NULL, xop2, // or NULL, C, I_range, GxB_RANGE, J_range, GxB_RANGE, NULL)) ; // force completion; if this statement does not appear, the // timing will not account for the final build, which would be // postponed until A is used by the caller in another GraphBLAS // operation. GrB_Matrix_nvals (&nvals, A) ; *A_output = A ; // set A to NULL so the FREE_ALL macro does not free *A_output A = NULL ; } } else { //---------------------------------------------------------------------- // return the matrix as-is //---------------------------------------------------------------------- if (pr) printf ("leave A as-is\n") ; *A_output = C ; // set C to NULL so the FREE_ALL macro does not free *A_output C = NULL ; } //-------------------------------------------------------------------------- // success: free everything except the result, and return it to the caller //-------------------------------------------------------------------------- FREE_ALL ; if (pr) printf ("\nMatrix from file:\n") ; GxB_print (*A_output, pr ? GxB_SHORT : GxB_SILENT) ; return (GrB_SUCCESS) ; }
model.h
#pragma once #include <util/common/geom/point.h> #include <util/common/math/vec.h> #include <util/common/plot/plot.h> #include <util/common/math/fuzzy.h> #include <util/common/math/complex.h> #include <cstdint> #include <vector> #include <map> #include <set> #include <array> #include <atomic> #include <omp.h> #ifndef M_PI #define M_PI 3.1415926535897932384626433832795 #endif // !M_PI namespace model { /*****************************************************/ /* params */ /*****************************************************/ struct parameters { double carrier; double sampling_rate; double bitrate; size_t N; double tau; // 0..1 double doppler; double snr; double dopp_from, dopp_to; size_t dopp_count; int num_of_tests; }; inline parameters make_default_parameters() { parameters p = { 0, 9600 * 3, 9600, 64, 0.5, 100.0, -10, 0.0, 200.0, 20, 10 }; return p; } /*****************************************************/ /* data */ /*****************************************************/ class cancellation_token { private: mutable std::shared_ptr < std::atomic_bool > _cancelled; public: cancellation_token() : _cancelled(std::make_shared < std::atomic_bool > ()) { } public: void cancel() const { *_cancelled = true; } bool is_cancelled() const { return *_cancelled; } public: operator bool() const { return is_cancelled(); } }; using ct_t = cancellation_token; enum modulation_t { AM, PM, FM }; using signal_t = std::vector < geom::point < double, math::complex<> > > ; template < typename T > struct sigtuple_t { T am, pm, fm; }; using signals_t = sigtuple_t < signal_t > ; struct signals2d_t { std::vector < std::vector < geom::point2d_t > > grid; sigtuple_t < std::vector < std::vector < double > > > mat; }; using stats_t = sigtuple_t < double > ; using dstats_t = sigtuple_t < std::pair < double, double > > ; struct signals_pair { signals_t base; signals_t recv; }; struct recv_params { double tau; double doppler; }; inline recv_params from_params(const parameters & p) { return { p.tau, p.doppler }; } template < typename T > struct slice { std::vector < T > & data; size_t offset; size_t len; slice(std::vector < T > & d, size_t o, size_t l) : data(d), offset(o), len(l) { } T operator[] (size_t i) { return data[i + offset]; } }; template < typename T > inline slice < T > make_slice(std::vector < T > & d, size_t o, size_t l) { return slice < T > (d, o, l); } inline size_t boole(bool b) { return b ? 1 : 0; } inline void modulate ( slice < bool > bits, signals_t & r, const parameters & p, double dopp ) { using math::_i; ASSERT((bits.len & 1) == 0); double ts = (double)bits.len / p.bitrate; size_t len = std::floor(ts * p.sampling_rate); r.am.resize(len); r.pm.resize(len); r.fm.resize(len); #pragma omp parallel for for (int i = 0; i < (int)len; ++i) { double t = i / p.sampling_rate; size_t bit_idx = std::round(t * p.bitrate); size_t bit = boole(bits[bit_idx]); math::complex<> val; math::complex<> doppler = std::exp(- 2 * M_PI * _i * dopp * t); math::complex<> base = std::exp(- 2 * M_PI * _i * p.carrier * t); { val = bit; r.am[i] = { t, base * val * doppler }; } { val = std::exp(M_PI * _i * bit); r.pm[i] = { t, base * val * doppler }; } { double df = (bit ? 1 : -1) * p.bitrate / 2; double dphi = bit; val = std::exp(- (2 * M_PI * _i * df * t + M_PI * _i * dphi)); r.fm[i] = { t, base * val * doppler }; } } } inline math::complex<> noise() { using math::_i; auto _1 = 1 + _i; math::complex<> r; for (size_t i = 0; i < 12; ++i) { r = r + (rand() + _i * rand()) / RAND_MAX - 0.5 * _1; } return r; } inline void noisify(signals_t & r, double snr) { double es2en = std::exp(snr / 10.0); double e_n = 0, e_am = 0, e_pm = 0, e_fm = 0; std::vector < math::complex<> > n(r.am.size()); #pragma omp parallel for reduction(+:e_n,e_am,e_pm,e_fm) for (int i = 0; i < (int)r.am.size(); ++i) { auto n0 = noise(); n[i] = n0; e_n += math::sqnorm(n0); e_am += math::sqnorm(r.am[i].y); e_pm += math::sqnorm(r.pm[i].y); e_fm += math::sqnorm(r.fm[i].y); } #pragma omp parallel for for (int i = 0; i < (int)r.am.size(); ++i) { r.am[i].y = r.am[i].y + (e_am / es2en / e_n) * n[i]; r.pm[i].y = r.pm[i].y + (e_pm / es2en / e_n) * n[i]; r.fm[i].y = r.fm[i].y + (e_fm / es2en / e_n) * n[i]; } } inline void gen_signals ( signals_pair & r, const parameters & p, const recv_params & rp ) { ASSERT((p.N & 1) == 0); double base_ts = (double)p.N / p.bitrate; ASSERT(0 <= rp.tau && rp.tau < 1); size_t recv_off = std::floor(rp.tau * base_ts * p.bitrate); std::vector < bool > bits(2 * p.N + 1); for (size_t i = 0; i < bits.size(); ++i) bits[i] = (rand() < RAND_MAX / 2); auto base_bits = make_slice(bits, recv_off, p.N); auto recv_bits = make_slice(bits, 0, 2 * p.N); modulate(base_bits, r.base, p, 0); noisify(r.base, +10.0); modulate(recv_bits, r.recv, p, rp.doppler); noisify(r.recv, p.snr); } inline stats_t correlate ( const signals_pair & s, signals_t & r, double dopp, bool normalize, ct_t ct ) { r.am.clear(); r.pm.clear(); r.fm.clear(); r.am.resize(s.base.am.size()); r.pm.resize(s.base.am.size()); r.fm.resize(s.base.am.size()); double am_max = -1, pm_max = -1, fm_max = -1; double am_std = 0, pm_std = 0, fm_std = 0; double am_mean = 0, pm_mean = 0, fm_mean = 0; std::vector < math::complex<> > doppler(s.recv.am.size()); #pragma omp parallel for for (int i = 0; i < (int)s.recv.am.size(); ++i) { doppler[i] = std::exp(math::_i * 2 * M_PI * dopp * s.recv.am[i].x); } #pragma omp parallel for reduction(+:am_std,pm_std,fm_std,am_mean,pm_mean,fm_mean) for (int i = 0; i < (int)s.base.am.size(); ++i) { // cannot break due to parallel for, // just omit real calculations instead if (ct) continue; for (size_t j = 0; j < s.base.am.size(); ++j) { r.am[i] = { s.base.am[i].x, r.am[i].y + s.base.am[j].y * math::conjugate(s.recv.am[i + j].y * doppler[i + j]) }; r.pm[i] = { s.base.am[i].x, r.pm[i].y + s.base.pm[j].y * math::conjugate(s.recv.pm[i + j].y * doppler[i + j]) }; r.fm[i] = { s.base.am[i].x, r.fm[i].y + s.base.fm[j].y * math::conjugate(s.recv.fm[i + j].y * doppler[i + j]) }; } double am_cur = (r.am[i].y = math::norm(r.am[i].y)).re; double pm_cur = (r.pm[i].y = math::norm(r.pm[i].y)).re; double fm_cur = (r.fm[i].y = math::norm(r.fm[i].y)).re; am_std += am_cur * am_cur; pm_std += pm_cur * pm_cur; fm_std += fm_cur * fm_cur; am_mean += am_cur; pm_mean += pm_cur; fm_mean += fm_cur; #pragma omp critical { if (am_max < am_cur) am_max = am_cur; if (pm_max < pm_cur) pm_max = pm_cur; if (fm_max < fm_cur) fm_max = fm_cur; } } am_mean /= s.base.am.size(); pm_mean /= s.base.am.size(); fm_mean /= s.base.am.size(); am_std = std::sqrt(am_std / s.base.am.size() - am_mean * am_mean); pm_std = std::sqrt(pm_std / s.base.am.size() - pm_mean * pm_mean); fm_std = std::sqrt(fm_std / s.base.am.size() - fm_mean * fm_mean); if (normalize) { #pragma omp parallel for for (int i = 0; i < (int)s.base.am.size(); ++i) { r.am[i].y = r.am[i].y / am_max; r.pm[i].y = r.pm[i].y / pm_max; r.fm[i].y = r.fm[i].y / fm_max; } } return { (am_max - am_mean) / am_std, (pm_max - pm_mean) / pm_std, (fm_max - fm_mean) / fm_std }; } inline void quality ( signals_t & r, const parameters & p, ct_t ct, const std::function < void() > & cb ) { signals_t c; double ddopp = (p.dopp_to - p.dopp_from) / p.dopp_count; r.am.clear(); r.pm.clear(); r.fm.clear(); r.am.resize(p.dopp_count); r.pm.resize(p.dopp_count); r.fm.resize(p.dopp_count); for (size_t k = 0; k < p.num_of_tests; ++k) { if (ct) break; for (size_t i = 0; i < p.dopp_count; ++i) { if (ct) break; double dopp = p.dopp_from + ddopp * i; signals_pair s; recv_params rp = from_params(p); rp.doppler = dopp; gen_signals(s, p, rp); if (ct) break; auto stats = correlate(s, c, 0, false, ct); r.am[i] = { dopp, r.am[i].y * k / (k + 1) + stats.am / (k + 1) }; r.pm[i] = { dopp, r.pm[i].y * k / (k + 1) + stats.pm / (k + 1) }; r.fm[i] = { dopp, r.fm[i].y * k / (k + 1) + stats.fm / (k + 1) }; } if (ct) break; cb(); } } inline dstats_t abmigfun ( const signals_pair & s, signals2d_t & r, const parameters & p, ct_t ct, const std::function < void() > & cb ) { signals_t c; double ddopp = (p.dopp_to - p.dopp_from) / p.dopp_count; r.grid.resize(p.dopp_count); r.mat.am.resize(p.dopp_count); r.mat.pm.resize(p.dopp_count); r.mat.fm.resize(p.dopp_count); for (size_t i = 0; i < p.dopp_count; ++i) { r.grid[i].clear(); r.grid[i].resize(s.base.am.size()); r.mat.am[i].clear(); r.mat.am[i].resize(s.base.am.size()); r.mat.pm[i].clear(); r.mat.pm[i].resize(s.base.am.size()); r.mat.fm[i].clear(); r.mat.fm[i].resize(s.base.am.size()); } double am_max = 0, pm_max = 0, fm_max = 0; double am_dopp = 0, pm_dopp = 0, fm_dopp = 0; double am_tau = 0, pm_tau = 0, fm_tau = 0; double tau_max = 0; for (size_t i = 0; i < p.dopp_count; ++i) { if (ct) break; double dopp = p.dopp_from + ddopp * i; if (ct) break; correlate(s, c, dopp, false, ct); for (int j = 0; j < (int)s.base.am.size(); ++j) { r.grid[i][j] = { dopp, c.am[j].x }; r.mat.am[i][j] = c.am[j].y.re; r.mat.pm[i][j] = c.pm[j].y.re; r.mat.fm[i][j] = c.fm[j].y.re; tau_max = std::fmax(tau_max, c.am[j].x); if (am_max < r.mat.am[i][j]) { am_max = r.mat.am[i][j]; am_dopp = dopp; am_tau = c.am[j].x; } if (pm_max < r.mat.pm[i][j]) { pm_max = r.mat.pm[i][j]; pm_dopp = dopp; pm_tau = c.pm[j].x; } if (fm_max < r.mat.fm[i][j]) { fm_max = r.mat.fm[i][j]; fm_dopp = dopp; fm_tau = c.fm[j].x; } } if (ct) break; cb(); } return { { am_tau / tau_max, am_dopp }, { pm_tau / tau_max, pm_dopp }, { fm_tau / tau_max, fm_dopp } }; } /*****************************************************/ /* drawing */ /*****************************************************/ using points_t = std::vector < geom::point2d_t > ; using grid_t = std::vector < std::vector < geom::point < double > > > ; using mat_t = std::vector < std::vector < double > > ; struct plot_data { util::ptr_t < points_t > data; plot::list_drawable < points_t > :: ptr_t plot; }; struct complex_plot_data { plot_data re, im; }; struct plot2d_group_data { std::shared_ptr < grid_t > grid; sigtuple_t < std::shared_ptr < mat_t > > mat; }; struct complex_plot_group_data { plot::auto_viewport < points_t > ::ptr_t autoworld; complex_plot_data am, pm, fm; }; struct single_plot_group_data { plot::auto_viewport < points_t > ::ptr_t autoworld; plot_data am, pm, fm; }; struct model_data { util::ptr_t < parameters > params; complex_plot_group_data signals; complex_plot_group_data signals_shifted; single_plot_group_data correlation; single_plot_group_data quality; plot2d_group_data ambigfun; }; inline static plot_data make_plot_data ( plot::palette::pen_ptr pen = plot::palette::pen(0xffffff), plot::list_data_format data_format = plot::list_data_format::chain ) { plot_data pd; pd.data = util::create < points_t > (); pd.plot = plot::list_drawable < points_t > :: create ( plot::make_data_source(pd.data), nullptr, // no point painter pen ); pd.plot->data_format = data_format; return pd; } inline static complex_plot_group_data make_plot_group_data ( plot::palette::pen_ptr am_re = plot::palette::pen(0x5555ff, 2), plot::palette::pen_ptr pm_re = plot::palette::pen(0x55aa55, 2), plot::palette::pen_ptr fm_re = plot::palette::pen(0xff0000, 2), plot::palette::pen_ptr am_im = plot::palette::pen(0x111166), plot::palette::pen_ptr pm_im = plot::palette::pen(0x114411), plot::palette::pen_ptr fm_im = plot::palette::pen(0x660000), plot::list_data_format data_format = plot::list_data_format::chain ) { complex_plot_group_data pd; pd.autoworld = plot::min_max_auto_viewport < points_t > ::create(); pd.am.re = make_plot_data(am_re, data_format); pd.pm.re = make_plot_data(pm_re, data_format); pd.fm.re = make_plot_data(fm_re, data_format); pd.am.im = make_plot_data(am_im, data_format); pd.pm.im = make_plot_data(pm_im, data_format); pd.fm.im = make_plot_data(fm_im, data_format); return pd; } inline static single_plot_group_data make_single_plot_group_data ( plot::palette::pen_ptr am_re = plot::palette::pen(0x5555ff, 2), plot::palette::pen_ptr pm_re = plot::palette::pen(0x55aa55, 2), plot::palette::pen_ptr fm_re = plot::palette::pen(0xff0000, 2), plot::list_data_format data_format = plot::list_data_format::chain ) { single_plot_group_data pd; pd.autoworld = plot::min_max_auto_viewport < points_t > ::create(); pd.am = make_plot_data(am_re, data_format); pd.pm = make_plot_data(pm_re, data_format); pd.fm = make_plot_data(fm_re, data_format); return pd; } inline static plot2d_group_data make_plot2d_group_data() { plot2d_group_data pd; pd.grid = std::make_shared < grid_t > (); pd.mat.am = std::make_shared < mat_t > (); pd.mat.pm = std::make_shared < mat_t > (); pd.mat.fm = std::make_shared < mat_t > (); return pd; } template < typename PlotGroupData > inline static plot::drawable::ptr_t make_root_drawable ( const PlotGroupData & p, std::vector < plot::drawable::ptr_t > layers ) { using namespace plot; return viewporter::create( tick_drawable::create( layer_drawable::create(layers), const_n_tick_factory<axe::x>::create( make_simple_tick_formatter(6, 8), 0, 5 ), const_n_tick_factory<axe::y>::create( make_simple_tick_formatter(6, 8), 0, 5 ), palette::pen(RGB(80, 80, 80)), RGB(200, 200, 200) ), make_viewport_mapper(make_world_mapper < points_t > (p.autoworld)) ); } inline model_data make_model_data(const parameters & p = make_default_parameters()) { model_data md; md.params = util::create < parameters > (p); md.signals = make_plot_group_data(); md.signals_shifted = make_plot_group_data(); md.correlation = make_single_plot_group_data(); md.quality = make_single_plot_group_data(); md.ambigfun = make_plot2d_group_data(); return md; } inline void fill_complex(complex_plot_group_data & pg, complex_plot_data & pd, const signal_t & s) { pd.re.data->resize(s.size()); pd.im.data->resize(s.size()); for (size_t i = 0; i < s.size(); ++i) { pd.re.data->at(i) = { s[i].x, s[i].y.re }; pd.im.data->at(i) = { s[i].x, s[i].y.im }; } pg.autoworld->adjust(*pd.re.data); pg.autoworld->adjust(*pd.im.data); } inline void fill_re(single_plot_group_data & pg, plot_data & pd, const signal_t & s) { pd.data->resize(s.size()); pd.data->resize(s.size()); for (size_t i = 0; i < s.size(); ++i) { pd.data->at(i) = { s[i].x, s[i].y.re }; } pg.autoworld->adjust(*pd.data); } inline void fill_signals(model_data & md, const signals_pair & p) { md.signals.autoworld->clear(); md.signals_shifted.autoworld->clear(); fill_complex(md.signals, md.signals.am, p.base.am); fill_complex(md.signals, md.signals.pm, p.base.pm); fill_complex(md.signals, md.signals.fm, p.base.fm); fill_complex(md.signals_shifted, md.signals_shifted.am, p.recv.am); fill_complex(md.signals_shifted, md.signals_shifted.pm, p.recv.pm); fill_complex(md.signals_shifted, md.signals_shifted.fm, p.recv.fm); md.signals.autoworld->flush(); md.signals_shifted.autoworld->flush(); } inline void fill_corr(model_data & md, const signals_t & p) { md.correlation.autoworld->clear(); fill_re(md.correlation, md.correlation.am, p.am); fill_re(md.correlation, md.correlation.pm, p.pm); fill_re(md.correlation, md.correlation.fm, p.fm); md.correlation.autoworld->flush(); } inline void fill_qual(model_data & md, const signals_t & p) { md.quality.autoworld->clear(); fill_re(md.quality, md.quality.am, p.am); fill_re(md.quality, md.quality.pm, p.pm); fill_re(md.quality, md.quality.fm, p.fm); md.quality.autoworld->flush(); } inline void fill_af(model_data & md, const signals2d_t & p) { double am_max = 0, pm_max = 0, fm_max = 0; double x_min = (std::numeric_limits < double > :: max) () , x_max = (std::numeric_limits < double > :: lowest) () , y_min = x_min , y_max = x_max; md.ambigfun.grid->resize(p.grid.size()); md.ambigfun.mat.am->resize(p.grid.size()); md.ambigfun.mat.pm->resize(p.grid.size()); md.ambigfun.mat.fm->resize(p.grid.size()); for (size_t i = 0; i < p.grid.size(); ++i) { md.ambigfun.grid->at(i).resize(p.grid[i].size()); md.ambigfun.mat.am->at(i).resize(p.grid[i].size()); md.ambigfun.mat.pm->at(i).resize(p.grid[i].size()); md.ambigfun.mat.fm->at(i).resize(p.grid[i].size()); #pragma omp parallel for firstprivate(i) for (int j = 0; j < (int)p.grid[i].size(); ++j) { am_max = std::fmax(am_max, p.mat.am[i][j]); pm_max = std::fmax(pm_max, p.mat.pm[i][j]); fm_max = std::fmax(fm_max, p.mat.fm[i][j]); x_max = std::fmax(x_max, p.grid[i][j].x); y_max = std::fmax(y_max, p.grid[i][j].y); x_min = std::fmin(x_min, p.grid[i][j].x); y_min = std::fmin(y_min, p.grid[i][j].y); } } for (size_t i = 0; i < p.grid.size(); ++i) #pragma omp parallel for firstprivate(i) for (int j = 0; j < (int)p.grid[i].size(); ++j) { md.ambigfun.grid->at(i).at(j) = { 2 * (p.grid[i][j].x - x_min) / (x_max - x_min) - 1, 2 * (p.grid[i][j].y - y_min) / (y_max - y_min) - 1 }; md.ambigfun.mat.am->at(i).at(j) = p.mat.am[i][j] / am_max; md.ambigfun.mat.pm->at(i).at(j) = p.mat.pm[i][j] / pm_max; md.ambigfun.mat.fm->at(i).at(j) = p.mat.fm[i][j] / fm_max; } } }
convolution_pack1ton.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1ton_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1ton, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl); if (bias_data_ptr) { _sum = vle32_v_f32m1(bias_data_ptr + p * packn, vl); } const float* kptr = (const float*)weight_data_pack1ton + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) // 29.23 { float val = sptr[space_ofs[k]]; vfloat32m1_t _w = vle32_v_f32m1(kptr, vl); _sum = vfmacc_vf_f32m1(_sum, val, _w, vl); kptr += packn; } } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse32_v_f32m1(outptr + j * packn, _sum, vl); } outptr += outw * packn; } } }
9672.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(2) { /* E := A*B */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
haval_fmt_plug.c
/* HAVAL cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_haval_256_3; extern struct fmt_main fmt_haval_128_4; #elif FMT_REGISTERS_H john_register_one(&fmt_haval_256_3); john_register_one(&fmt_haval_128_4); #else #include <string.h> #include "arch.h" #include "sph_haval.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // Tuned on core i7 quad HT // 256-3 128-4 // 1 227k 228k // 64 6359k 5489k // 128 7953k 6654k // 256 8923k 7618k // 512 9804k 8223k // 1k 10307k 8569k ** set to this value // 2k 10081k 8427k // 4k 10551k 8893k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 64 #else #define OMP_SCALE 1024 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_TAG "$haval$" #define TAG_LENGTH 7 #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE256 32 #define BINARY_SIZE128 16 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests haval_256_3_tests[] = { {"91850C6487C9829E791FC5B58E98E372F3063256BB7D313A93F1F83B426AEDCC", "HAVAL"}, {"$haval$91850C6487C9829E791FC5B58E98E372F3063256BB7D313A93F1F83B426AEDCC", "HAVAL"}, // john.pot uses lower case hex, so repeat that hash with lower case hex {"$haval$91850c6487c9829e791fc5b58e98e372f3063256bb7d313a93f1f83b426aedcc", "HAVAL"}, {"8699f1e3384d05b2a84b032693e2b6f46df85a13a50d93808d6874bb8fb9e86c", "abc"}, {"$haval$8699f1e3384d05b2a84b032693e2b6f46df85a13a50d93808d6874bb8fb9e86c", "abc"}, {"cd43bec91c50e5f781fc50a78a3e9c8c48b407fa35a20c972178d63867dbe158", "john"}, {"$haval$cd43bec91c50e5f781fc50a78a3e9c8c48b407fa35a20c972178d63867dbe158", "john"}, {"5aa9c913463f82260071629c8ac2c54d73b3af016ffd8e8ce128558d909fab06", "passweird"}, {"$haval$5aa9c913463f82260071629c8ac2c54d73b3af016ffd8e8ce128558d909fab06", "passweird"}, {NULL} }; static struct fmt_tests haval_128_4_tests[] = { {"EE6BBF4D6A46A679B3A856C88538BB98", ""}, {"$haval$ee6bbf4d6a46a679b3a856c88538bb98", ""}, {"6f2132867c9648419adcd5013e532fa2", "abc"}, {"$haval$6f2132867c9648419adcd5013e532fa2", "abc"}, {"c98232b4ae6e7ef3235e838387111f23", "john"}, {"$haval$c98232b4ae6e7ef3235e838387111f23", "john"}, {"50683b38df349781b2ef29e7720eb730", "passweird"}, {"$haval$50683b38df349781b2ef29e7720eb730", "passweird"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE256 / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (!saved_key) { saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self, int len) { char *p; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (strlen(p) != len) return 0; while(*p) if(atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } /* we need independent valids, since the $haval$ signature is the same */ /* otherwise, if we have input with a mix of both types, then ALL of them */ /* will validate, even though only the ones of the proper type will actually */ /* be tested. If we had a singleton crypt function (which both 128-4 and */ /* 256-3 used, then a single valid would also work. But since each have */ /* their own crypt, and they are NOT compatible, then we need separate valids */ static int valid3(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 64); } static int valid4(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 32); } static void *get_binary_256(char *ciphertext) { static union { unsigned char c[32]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 32; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *get_binary_128(char *ciphertext) { static union { unsigned char c[16]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 16; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_256_3(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_haval256_3_context ctx; sph_haval256_3_init(&ctx); sph_haval256_3(&ctx, saved_key[index], strlen(saved_key[index])); sph_haval256_3_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int crypt_128_4(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_haval128_4_context ctx; sph_haval128_4_init(&ctx); sph_haval128_4(&ctx, saved_key[index], strlen(saved_key[index])); sph_haval128_4_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one256(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE256); } static int cmp_one128(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE128); } static int cmp_exact(char *source, int index) { return 1; } static void haval_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + 2 * BINARY_SIZE256 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; strcpy(out, FORMAT_TAG); strcpy(&out[TAG_LENGTH], ciphertext); strlwr(&out[TAG_LENGTH]); return out; } struct fmt_main fmt_haval_256_3 = { { "HAVAL-256-3", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE256, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, haval_256_3_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid3, split, get_binary_256, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, haval_set_key, get_key, fmt_default_clear_keys, crypt_256_3, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one256, cmp_exact } }; struct fmt_main fmt_haval_128_4 = { { "HAVAL-128-4", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE128, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, haval_128_4_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid4, split, get_binary_128, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, haval_set_key, get_key, fmt_default_clear_keys, crypt_128_4, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one128, cmp_exact } }; #endif /* plugin stanza */
GxB_BinaryOp_xtype.c
//------------------------------------------------------------------------------ // GxB_BinaryOp_xtype: return the type of x for z=f(x,y) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // NOTE: this function is historical. Use GxB_BinaryOp_xtype_name instead. #include "GB.h" GrB_Info GxB_BinaryOp_xtype // type of x ( GrB_Type *xtype, // return type of input x GrB_BinaryOp binaryop // binary operator to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_BinaryOp_xtype (&xtype, binaryop)") ; GB_RETURN_IF_NULL (xtype) ; GB_RETURN_IF_NULL_OR_FAULTY (binaryop) ; ASSERT_BINARYOP_OK (binaryop, "binaryop for xtype", GB0) ; //-------------------------------------------------------------------------- // return the xtype //-------------------------------------------------------------------------- (*xtype) = binaryop->xtype ; #pragma omp flush return (GrB_SUCCESS) ; }
DenseMatrix.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseMatrix.h // \brief Header file for the OpenMP-based dense matrix SMP implementation // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include "../../../math/Aliases.h" #include "../../../math/AlignmentFlag.h" #include "../../../math/constraints/SMPAssignable.h" #include "../../../math/expressions/DenseMatrix.h" #include "../../../math/expressions/SparseMatrix.h" #include "../../../math/functors/AddAssign.h" #include "../../../math/functors/Assign.h" #include "../../../math/functors/MultAssign.h" #include "../../../math/functors/SchurAssign.h" #include "../../../math/functors/SubAssign.h" #include "../../../math/simd/SIMDTrait.h" #include "../../../math/smp/ParallelSection.h" #include "../../../math/smp/SerialSection.h" #include "../../../math/smp/ThreadMapping.h" #include "../../../math/StorageOrder.h" #include "../../../math/typetraits/IsDenseMatrix.h" #include "../../../math/typetraits/IsSIMDCombinable.h" #include "../../../math/typetraits/IsSMPAssignable.h" #include "../../../math/views/Submatrix.h" #include "../../../system/SMP.h" #include "../../../util/algorithms/Min.h" #include "../../../util/Assert.h" #include "../../../util/EnableIf.h" #include "../../../util/FunctionTrace.h" #include "../../../util/StaticAssert.h" #include "../../../util/Types.h" namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side dense matrix to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side dense matrix , bool SO2 // Storage order of the right-hand side dense matrix , typename OP > // Type of the assignment operation void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<MT1>; using ET2 = ElementType_t<MT2>; constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<MT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads( omp_get_num_threads() ); const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) ); const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL ); const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 ); const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) ); const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) ); const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL ); const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 ); const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) ); const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0; i<threads; ++i ) { const size_t row ( ( i / threadmap.second ) * rowsPerThread ); const size_t column( ( i % threadmap.second ) * colsPerThread ); if( row >= (~rhs).rows() || column >= (~rhs).columns() ) continue; const size_t m( min( rowsPerThread, (~rhs).rows() - row ) ); const size_t n( min( colsPerThread, (~rhs).columns() - column ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( submatrix<aligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( submatrix<aligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else { auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side sparse matrix to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side sparse matrix , bool SO2 // Storage order of the right-hand side sparse matrix , typename OP > // Type of the assignment operation void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const size_t threads( omp_get_num_threads() ); const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) ); const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 ); const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 ); #pragma omp for schedule(dynamic,1) nowait for( size_t i=0; i<threads; ++i ) { const size_t row ( ( i / threadmap.second ) * rowsPerThread ); const size_t column( ( i % threadmap.second ) * colsPerThread ); if( row >= (~rhs).rows() || column >= (~rhs).columns() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, Assign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, AddAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a // dense matrix. Due to the explicit application of the SFINAE principle, this function can only // be selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, SubAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SCHUR PRODUCT ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix for the Schur product. // \return void // // This function implements the default OpenMP-based SMP Schur product assignment to a dense // matrix. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); schurAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix for the Schur product. // \return void // // This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due // to the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { schurAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, SchurAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_t< IsDenseMatrix_v<MT1> > smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
main.c
#include <stdio.h> #include <omp.h> int main() { int addition, threadID; addition = 0; printf("Using critical\n"); #pragma omp parallel shared(addition) private(threadID) { threadID = omp_get_thread_num(); #pragma omp critical { addition = addition + 1; printf("Thread %d is accessing value %d\n", threadID, addition); } } printf("Final value of the addition is %d\n", addition); addition = 0; printf("Not using critical\n"); #pragma omp parallel shared(addition) private(threadID) { threadID = omp_get_thread_num(); //#pragma omp critical { addition = addition + 1; printf("Thread %d is accessing value %d\n", threadID, addition); } } printf("Final value of the addition is %d\n", addition); }
mandelbrot-parallel.c
// // mandelbrot.c // // // The Mandelbrot calculation is to iterate the equation // z = z*z + c, where z and c are complex numbers, z is initially // zero, and c is the coordinate of the point being tested. If // the magnitude of z remains less than 2 for ever, then the point // c is in the Mandelbrot set. In this code We write out the number of iterations // before the magnitude of z exceeds 2, or UCHAR_MAX, whichever is // smaller.// // // #include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <stdio.h> #include <omp.h> /*void color(int red, int green, int blue) { #pragma omp critical { fputc((char)red, stdout); fputc((char)green, stdout); fputc((char)blue, stdout); } }*/ int main(int argc, char *argv[]) { // int w = 600, h = 400, x, y; int w = atoi(argv[1]), h = atoi(argv[2]), x, y; //each iteration, it calculates: newz = oldz*oldz + p, where p is the current pixel, and oldz stars at the origin double pr, pi; //real and imaginary part of the pixel p double newRe, newIm, oldRe, oldIm; //real and imaginary parts of new and old z double zoom = 1, moveX = -0.5, moveY = 0; //you can change these to zoom and change position int maxIterations = atoi(argv[3]);//after how much iterations the function should stop typedef unsigned char pixelType[3]; clock_t begin, end; double time_spent; pixelType *pixels=malloc(sizeof(pixelType)*h*w); FILE * sortida; printf("P6\n# CREATOR: Eric R. Weeks / mandel program\n"); printf("%d %d\n255\n",w,h); double timeBegin=omp_get_wtime(); begin = clock(); //loop through every pixel #pragma omp parallel for shared(pixels,moveX,moveY,zoom) private(x,y,pr,pi,newRe,newIm,oldRe,oldIm) schedule(static) for(y = 0; y < h; y++) for(x = 0; x < w; x++) { //calculate the initial real and imaginary part of z, based on the pixel location and zoom and position values pr = 1.5 * (x - w / 2) / (0.5 * zoom * w) + moveX; pi = (y - h / 2) / (0.5 * zoom * h) + moveY; newRe = newIm = oldRe = oldIm = 0; //these should start at 0,0 //"i" will represent the number of iterations int i; //start the iteration process for(i = 0; i < maxIterations; i++) { //remember value of previous iteration oldRe = newRe; oldIm = newIm; //the actual iteration, the real and imaginary part are calculated newRe = oldRe * oldRe - oldIm * oldIm + pr; newIm = 2 * oldRe * oldIm + pi; //if the point is outside the circle with radius 2: stop if((newRe * newRe + newIm * newIm) > 4) break; } // color(i % 256, 255, 255 * (i < maxIterations)); if(i == maxIterations){ pixels[y*w+x][0]=(char)0; pixels[y*w+x][1]=(char)0; pixels[y*w+x][2]=(char)0; } //color(0, 0, 0); // black else { double z = sqrt(newRe * newRe + newIm * newIm); int brightness = 256 * log2(1.75 + i - log2(log2(z))) / log2((double)maxIterations); //color(brightness, brightness, 255); pixels[y*w+x][0]=(char)brightness; pixels[y*w+x][1]=(char)brightness; pixels[y*w+x][2]=(char)255; } } end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; fprintf(stderr, "Elapsed time: %.2lf seconds.\n", omp_get_wtime()-timeBegin); sortida= fopen("sortida.ppm","wb"); fprintf(sortida, "P6\n# CREATOR: Eric R. Weeks / mandel program\n"); fprintf(sortida, "%d %d\n255\n", w, h); x=0; y=0; for(y = 0; y < h; y++){ for(x = 0; x < w; x++){ fwrite(pixels[y*w+x],1,sizeof(pixelType),sortida); } } fclose(sortida); free(pixels); return 0; }
GB_unaryop__abs_int8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_int8 // op(A') function: GB_tran__abs_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi-v4.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if _DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if _DEBUG_ start= omp_get_wtime(); #else Extrae_event (PROGRAM, PI_COMPUTATION); #endif /* do computation -- using all available threads */ // WARNING : correct code #pragma omp parallel private(i, x) { int id = omp_get_thread_num(); int num_threads = omp_get_num_threads(); // interleaved execution of iterations among threads for (i=id; i < num_steps; i=i+num_threads) { x = (i+0.5)*step; #pragma omp critical sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } } pi = step * sum; #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #else Extrae_event (PROGRAM, END); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
multi-align.c
/** * Coral: short reads error correction with multiple alignments * Copyright (C) 2011 Leena Salmela <leena.salmela@cs.helsinki.fi> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <math.h> #include "define.h" #include "multi-align.h" #include "reverse.h" int quick = 0; int banded = 0; int full = 0; /* Compare two read_pos structures according to read ids */ int compare_read(const void *s1, const void *s2) { read_pos *r1, *r2; r1 = (read_pos *)s1; r2 = (read_pos *)s2; if (r1->read - r2->read == 0) { return r1->occ - r2->occ; } else { return r1->read - r2->read; } } /* Compare two read_pos structures according to positions */ int compare_pos(const void *s1, const void *s2) { read_pos *r1, *r2; r1 = (read_pos *)s1; r2 = (read_pos *)s2; return r1->pos - r2->pos; } /* Compare two read_pos structures according to counts */ int compare_count(const void *s1, const void *s2) { read_pos *r1, *r2; r1 = (read_pos *)s1; r2 = (read_pos *)s2; return r2->count - r1->count; } /* Update the consensus sequence of an alignment starting at position start and ending at position end */ void update_consensus(align *alignment, int start, int end) { int i; if (end >= MAX_CONTIG_LEN) { printf("multi-align: max contig len too short.\n"); exit(1); } #ifdef DEBUG_ALIGN printf("Updating consensus: %d -> %d\n", start, end); #endif for(i = start; i < end; i++) { if ((alignment->contigA[i] == 0 && alignment->contigC[i] == 0 && alignment->contigG[i] == 0 && alignment->contigT[i] == 0) || (alignment->contigN[i] > alignment->contigA[i] && alignment->contigN[i] > alignment->contigC[i] && alignment->contigN[i] > alignment->contigG[i] && alignment->contigN[i] > alignment->contigT[i])) { alignment->consensus[i] = '-'; } else if (alignment->contigA[i] > alignment->contigC[i] && alignment->contigA[i] > alignment->contigG[i] && alignment->contigA[i] > alignment->contigT[i]) { alignment->consensus[i] = 'A'; } else if (alignment->contigC[i] > alignment->contigG[i] && alignment->contigC[i] > alignment->contigT[i]) { alignment->consensus[i] = 'C'; } else if (alignment->contigG[i] > alignment->contigT[i]) { alignment->consensus[i] = 'G'; } else { alignment->consensus[i] = 'T'; } } } /** * Align one more read against the alignment. * - alignment: The alignment computed so far * - reads: The set of reads * - pos: The set of reads for which the alignment is computed * - end: Length of alignment * - p: The id to pos of the new read to align (the reads with smaller ids have already been aligned) * Returns the length of the alignment after the new read has been added. */ int align_read(align *alignment, char **reads, char **quals, read_pos *pos, int end, int p, double max_error_rate, int match_reward, int mm_penalty, int gap_penalty) { int i,j; int m; char *read = reads[pos[p].read]; char *qual = (quals == NULL) ? NULL: quals[pos[p].read]; int len; char r[MAX_READ_LENGTH]; char rq[MAX_READ_LENGTH]; char er[MAX_READ_LENGTH]; char erq[MAX_READ_LENGTH]; int k; int kk; int m2; int edits; int old_end; #pragma omp critical { m = strlen(read); if (pos[p].ori == 'C') { // Form the reverse complement before aligning for(i = 0; i < (signed int)m; i++) { switch(read[i]) { case 'a': case'A': r[m-i-1] = 'T'; break; case 'c': case'C': r[m-i-1] = 'G'; break; case 'g': case'G': r[m-i-1] = 'C'; break; case 't': case'T': r[m-i-1] = 'A'; break; case 'N': case 'n': r[m-i-1] = 'N'; break; } if (quals != NULL) rq[m-i-1] = qual[i]; } r[m] = '\0'; read = r; if (quals != NULL) { rq[m] = '\0'; qual = rq; } } else { // Make a local copy for(i = 0; i < (signed int)m; i++) { r[i] = read[i]; if (quals != NULL) rq[i] = qual[i]; } r[m] = '\0'; read = r; if (quals != NULL) { rq[m] = '\0'; qual = rq; } } } if (end <= 0) { // length of consensus is 0 // update base counts for(i = 0; i < (signed int)m; i++) { alignment->contigA[i] = 0; alignment->contigC[i] = 0; alignment->contigG[i] = 0; alignment->contigT[i] = 0; alignment->contigN[i] = 0; switch(read[i]) { case 'a': case 'A': alignment->contigA[i]++; break; case 'c': case 'C': alignment->contigC[i]++; break; case 'g': case 'G': alignment->contigG[i]++; break; case 't': case 'T': alignment->contigT[i]++; break; case '-': case'N': case 'n': alignment->contigN[i]++; break; } } alignment->offset = pos[p].pos; pos[p].pos = 0; strcpy(pos[p].edited_read, reads[pos[p].read]); if (quals != NULL) { memcpy(pos[p].edited_qual, quals[pos[p].read], m); pos[p].edited_qual[m] = '\0'; } pos[p].aligned = 1; return m; } update_consensus(alignment, 0, end); alignment->consensus[end] = '\0'; pos[p].pos = pos[p].pos - alignment->offset; #ifdef DEBUG_ALIGN printf("Aligning read %s (%d, %c, %d, %d)\n", read, pos[p].read, pos[p].ori, pos[p].pos, pos[p].count); printf("Against conse %s\n", alignment->consensus); #endif if (gap_penalty > 100*mm_penalty) { /* There won't be gaps... */ for(i = 0, j = pos[p].pos, edits = 0; i < m && j < end; i++, j++) { if (j >= 0) { if (alignment->consensus[j] != toupper(read[i])) edits++; } } if (edits > m*max_error_rate) { #ifdef DEBUG_ALIGN printf("Too many errors in read %d %c\n", pos[p].read, pos[p].ori); #endif /* alignment->ok = 0; */ return end; } } else { /* Try a quick gapless alignment positioned according to the k-mer*/ if (pos[p].pos > 0) { for(k = 0, j = 0; k < pos[p].pos; j++) { if (alignment->consensus[j] != '-') k++; } while(alignment->consensus[j] == '-')j++; pos[p].pos = j; } for(i = 0, j = pos[p].pos, edits = 0; i < m && j < end && edits < 1; i++, j++) { if (j >= 0) { if (alignment->consensus[j] == '-') { i--; } else { if (alignment->consensus[j] != toupper(read[i])) edits++; } } } } if (edits < 1 || gap_penalty > 100*mm_penalty) { quick++; /* Quick alignment found! */ #ifdef DEBUG_ALIGN printf("Quick alignment found for read %d (%c): %d\n", pos[p].read, pos[p].ori, pos[p].pos); #endif if (pos[p].pos < 0) { /* Insert columns to the beginning of the alignment */ for (k = end; k >= 0; k--) { alignment->contigA[k-pos[p].pos] = alignment->contigA[k]; alignment->contigC[k-pos[p].pos] = alignment->contigC[k]; alignment->contigG[k-pos[p].pos] = alignment->contigG[k]; alignment->contigT[k-pos[p].pos] = alignment->contigT[k]; alignment->contigN[k-pos[p].pos] = alignment->contigN[k]; alignment->consensus[k-pos[p].pos] = alignment->consensus[k]; } for(k = 0; k < -pos[p].pos; k++) { alignment->contigA[k] = 0; alignment->contigC[k] = 0; alignment->contigG[k] = 0; alignment->contigT[k] = 0; alignment->contigN[k] = 0; alignment->consensus[k] = 'A'; } end += -pos[p].pos; alignment->offset += pos[p].pos; for(k = 0; k < p; k++) { if (pos[k].aligned) pos[k].pos -= pos[p].pos; } pos[p].pos -= pos[p].pos; } for(i = 0, j = pos[p].pos, k=0; i < m && j < end; i++, j++, k++) { if (alignment->consensus[j] == '-' && gap_penalty <= 100*mm_penalty) { er[k] = '-'; if (quals != NULL) { if (i > 0 ) { erq[k] = (qual[i-1] + qual[i])/2; } else { erq[k] = qual[i]; } } i--; alignment->contigN[j]++; } else { er[k] = read[i]; if (quals != NULL) erq[k] = qual[i]; switch(read[i]) { case 'A': case 'a': alignment->contigA[j]++; break; case 'C': case 'c': alignment->contigC[j]++; break; case 'G': case 'g': alignment->contigG[j]++; break; case 'T': case 't': alignment->contigT[j]++; break; case 'N': case 'n': case '-': alignment->contigN[j]++; break; } } } for(; i < m; i++, j++, k++) { end++; alignment->contigA[j] = 0; alignment->contigC[j] = 0; alignment->contigG[j] = 0; alignment->contigT[j] = 0; alignment->contigN[j] = 0; er[k] = read[i]; if (quals != NULL) erq[k] = qual[i]; switch(read[i]) { case 'A': case 'a': alignment->contigA[j]++; break; case 'C': case 'c': alignment->contigC[j]++; break; case 'G': case 'g': alignment->contigG[j]++; break; case 'T': case 't': alignment->contigT[j]++; break; case 'N': case 'n': case '-': alignment->contigN[j]++; break; } } er[k] = '\0'; #ifdef DEBUG_ALIGN printf("ER: %s\n", er); #endif if (quals != NULL) erq[k] = '\0'; if (pos[p].ori == 'U') { for(i = 0; i < k; i++){ pos[p].edited_read[i] = er[i]; if (quals != NULL) pos[p].edited_qual[i] = erq[i]; } pos[p].edited_read[i] = '\0'; if (quals != NULL) pos[p].edited_qual[i] = '\0'; } else { for(i = 0; i < k; i++) { if (quals != NULL) pos[p].edited_qual[k-i-1] = erq[i]; switch(er[i]) { case 'A': case 'a': pos[p].edited_read[k-i-1] = 'T'; break; case 'C': case 'c': pos[p].edited_read[k-i-1] = 'G'; break; case 'G': case 'g': pos[p].edited_read[k-i-1] = 'C'; break; case 'T': case 't': pos[p].edited_read[k-i-1] = 'A'; break; default: pos[p].edited_read[k-i-1] = read[i]; } } pos[p].edited_read[i] = '\0'; if (quals != NULL) pos[p].edited_qual[i] = '\0'; } pos[p].aligned = 1; return end; } if (pos[p].occ == 1) { banded++; /* The k-mer occurs only once -> use banded alignment */ int s, e; s = pos[p].pos - (max_error_rate * m + 1); e = pos[p].pos + (max_error_rate * m + 1); #ifdef DEBUG_ALIGN printf("Band: %d -> %d\n", s, e); #endif for(i = s; i <= e; i++) { if (i >= 0) { if (alignment->consensus[i] == '-') { e++; } if (e >= end) { break; } } } for(j = 0; j < m; j++) { if (s < end) { int gapi; int gapj; if (s >= 0) { gapi = (s == end-1)? 0 : gap_penalty; if (alignment->consensus[s] == toupper(read[j])) { if (alignment->dp[s][j] + match_reward >= alignment->dp[s+1][j] - gapi) { alignment->dp[s+1][j+1] = alignment->dp[s][j] + match_reward; alignment->dp_trace[s+1][j+1] = 'M'; } else { alignment->dp[s+1][j+1] = alignment->dp[s+1][j] - gapi; alignment->dp_trace[s+1][j+1] = 'I'; } } else { if (alignment->dp[s][j] - mm_penalty >= alignment->dp[s+1][j] - gapi) { alignment->dp[s+1][j+1] = alignment->dp[s][j] - mm_penalty; alignment->dp_trace[s+1][j+1] = 'D'; } else { alignment->dp[s+1][j+1] = alignment->dp[s+1][j] - gapi; alignment->dp_trace[s+1][j+1] = 'I'; } } } for(i = (s < 0) ? 0 : s+1; i < ((e > end) ? end : e); i++) { gapi = (i == end-1)? 0 : gap_penalty; gapj = (j == m-1 || alignment->consensus[i] == '-')? 0 : gap_penalty; if (alignment->consensus[i] == toupper(read[j])) { if (alignment->dp[i][j] + match_reward >= alignment->dp[i][j+1] - gapj && alignment->dp[i][j] + match_reward >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j] + match_reward; alignment->dp_trace[i+1][j+1] = 'M'; } else if (alignment->dp[i][j+1] - gapj >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j+1] - gapj; alignment->dp_trace[i+1][j+1] = 'J'; } else { alignment->dp[i+1][j+1] = alignment->dp[i+1][j] - gapi; alignment->dp_trace[i+1][j+1] = 'I'; } } else { if (alignment->dp[i][j] - mm_penalty >= alignment->dp[i][j+1] - gapj && alignment->dp[i][j] - mm_penalty >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j] - mm_penalty; alignment->dp_trace[i+1][j+1] = 'D'; } else if (alignment->dp[i][j+1] - gapj >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j+1] - gapj; alignment->dp_trace[i+1][j+1] = 'J'; } else { alignment->dp[i+1][j+1] = alignment->dp[i+1][j] - gapi; alignment->dp_trace[i+1][j+1] = 'I'; } } } if (e < end && e >= 0) { gapj = (j == m-1 || alignment->consensus[e] == '-')? 0 : gap_penalty; if (alignment->consensus[e] == toupper(read[j])) { if (alignment->dp[e][j] + match_reward >= alignment->dp[e][j+1] - gapj) { alignment->dp[e+1][j+1] = alignment->dp[e][j] + match_reward; alignment->dp_trace[e+1][j+1] = 'M'; } else { alignment->dp[e+1][j+1] = alignment->dp[e][j+1] - gapj; alignment->dp_trace[e+1][j+1] = 'J'; } } else { if (alignment->dp[e][j] - mm_penalty >= alignment->dp[e][j+1] - gapj) { alignment->dp[e+1][j+1] = alignment->dp[e][j] - mm_penalty; alignment->dp_trace[e+1][j+1] = 'D'; } else { alignment->dp[e+1][j+1] = alignment->dp[e][j+1] - gapj; alignment->dp_trace[e+1][j+1] = 'J'; } } } } else { alignment->dp[end][j+1] = alignment->dp[end][j]; alignment->dp_trace[end][j+1] = 'I'; } s++; e++; while(s >= 0 && s < end-1 && alignment->consensus[s] == '-') { s++; /* alignment->dp[s][j+1] = alignment->dp[s-1][j+1]; */ /* alignment->dp_trace[s][j+1] = 'J'; */ } while(e >= 0 && e < end-1 && alignment->consensus[e] == '-') { e++; alignment->dp[e][j+1] = alignment->dp[e-1][j+1]; alignment->dp_trace[e][j+1] = 'J'; } } for(i = e-1; i < end; i++) { alignment->dp[i+1][m] = alignment->dp[i][m]; alignment->dp_trace[i+1][m] = 'J'; } } else { full++; /* The k-mer occurs several times -> compute full alignment */ /* Align the read against the consensus */ /* for(i = 0; i <= end; i++) { alignment->dp[i][0] = 0; alignment->dp_trace[i][0] = 'J'; } for(j = 0; j <= m; j++) { alignment->dp[0][j] = 0; alignment->dp_trace[0][j] = 'I'; } */ for(i = 0; i < end; i++) { int gapi = (i == end-1)? 0 : gap_penalty; for(j = 0; j < m; j++) { int gapj = (j == m-1 || alignment->consensus[i] == '-')? 0 : gap_penalty; if (alignment->consensus[i] == toupper(read[j])) { if (alignment->dp[i][j] + match_reward >= alignment->dp[i][j+1] - gapj && alignment->dp[i][j] + match_reward >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j] + match_reward; alignment->dp_trace[i+1][j+1] = 'M'; } else if (alignment->dp[i][j+1] - gapj >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j+1] - gapj; alignment->dp_trace[i+1][j+1] = 'J'; } else { alignment->dp[i+1][j+1] = alignment->dp[i+1][j] - gapi; alignment->dp_trace[i+1][j+1] = 'I'; } } else { if (alignment->dp[i][j] - mm_penalty >= alignment->dp[i][j+1] - gapj && alignment->dp[i][j] - mm_penalty >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j] - mm_penalty; alignment->dp_trace[i+1][j+1] = 'D'; } else if (alignment->dp[i][j+1] - gapj >= alignment->dp[i+1][j] - gapi) { alignment->dp[i+1][j+1] = alignment->dp[i][j+1] - gapj; alignment->dp_trace[i+1][j+1] = 'J'; } else { alignment->dp[i+1][j+1] = alignment->dp[i+1][j] - gapi; alignment->dp_trace[i+1][j+1] = 'I'; } } } } } /* Traceback in the dp array - first count edits */ i = end; j = m; len = 0; edits = 0; while(i >= 0 && alignment->dp_trace[i][j] == 'J') { #ifdef DEBUG_ALIGN printf("%c(%d)", alignment->dp_trace[i][j], alignment->dp[i][j]); #endif i--; } while(j > 0) { if (alignment->dp_trace[i][j] == 'D' || alignment->dp_trace[i][j] == 'M') { #ifdef DEBUG_ALIGN printf("%c(%d)", alignment->dp_trace[i][j], alignment->dp[i][j]); #endif len++; if (alignment->dp_trace[i][j] == 'D') edits++; i--; j--; } else if (alignment->dp_trace[i][j] == 'J') { #ifdef DEBUG_ALIGN printf("%c(%d)", alignment->dp_trace[i][j], alignment->dp[i][j]); #endif if (j > 0 && j < m && alignment->consensus[i-1] != '-') { len++; edits++; } i--; } else if (alignment->dp_trace[i][j] == 'I') { #ifdef DEBUG_ALIGN printf("%c(%d)", alignment->dp_trace[i][j], alignment->dp[i][j]); #endif if (i > 0 && i < end) { len++; edits++; } j--; } else { printf("Undefined dp_trace %d %d\n", i, j); alignment->ok = 0; return end; } } #ifdef DEBUG_ALIGN printf("\nTracing done. %d %d\n", edits, len); #endif if (edits > len*max_error_rate) { #ifdef DEBUG_ALIGN printf("Too many errors in read %d %c\n", pos[p].read, pos[p].ori); #endif alignment->ok = 0; return end; } /* Traceback in the dp array */ i = end; j = m; len = 0; edits = 0; old_end = end; while(i >= 0 && alignment->dp_trace[i][j] == 'J') { #ifdef DEBUG_ALIGN printf("%c", alignment->dp_trace[i][j]); #endif i--; } while(j > 0) { if (alignment->dp_trace[i][j] == 'D' || alignment->dp_trace[i][j] == 'M') { #ifdef DEBUG_ALIGN printf("%c", alignment->dp_trace[i][j]); #endif er[len] = read[j-1]; if (read[j-1] == 0) { printf("Whooot! %d %d\n", j-1, m); } if (quals != NULL) erq[len] = qual[j-1]; len++; if (alignment->dp_trace[i][j] == 'D') edits++; i--; j--; } else if (alignment->dp_trace[i][j] == 'J') { #ifdef DEBUG_ALIGN printf("%c", alignment->dp_trace[i][j]); #endif er[len] = '-'; if (quals != NULL) { erq[len] = (qual[j] + qual[j-1])/2; } len++; if (j > 0 && j < m && alignment->consensus[i-1] != '-') { edits++; } i--; } else if (alignment->dp_trace[i][j] == 'I') { #ifdef DEBUG_ALIGN printf("%c", alignment->dp_trace[i][j]); #endif er[len] = read[j-1]; if (read[j-1] == 0) { printf("Whooot? %d %d\n", j-1, m); } if (quals != NULL) erq[len] = qual[j-1]; len++; if (i > 0 && i < old_end) { edits++; } if (i == 0) { alignment->offset--; } j--; /* Insertion in the read. Make space for the insertion in consensus*/ for(k = end; k >= i; k--) { alignment->contigA[k+1] = alignment->contigA[k]; alignment->contigC[k+1] = alignment->contigC[k]; alignment->contigG[k+1] = alignment->contigG[k]; alignment->contigT[k+1] = alignment->contigT[k]; alignment->contigN[k+1] = alignment->contigN[k]; } alignment->contigA[i] = 0; alignment->contigC[i] = 0; alignment->contigG[i] = 0; alignment->contigT[i] = 0; alignment->contigN[i] = 0; end++; /* add the insertion to all aligned reads */ for(k = 0; k < p; k++) { if (pos[k].aligned) { if (pos[k].pos >= i) { pos[k].pos++; } else if (pos[k].pos < i && pos[k].pos + (signed) strlen(pos[k].edited_read) > i) { alignment->contigN[i]++; m2 = strlen(pos[k].edited_read); if (pos[k].ori == 'C') { for(kk = m2+1; kk >= 0; kk--) { if (pos[k].pos + m2 - kk < i) { pos[k].edited_read[kk] = pos[k].edited_read[kk-1]; if (quals != NULL) pos[k].edited_qual[kk] = pos[k].edited_qual[kk-1]; } else if (pos[k].pos + m2 - kk == i) { pos[k].edited_read[kk] = '-'; if (quals != NULL) { pos[k].edited_qual[kk] = (pos[k].edited_qual[kk-1] + pos[k].edited_qual[kk+1])/2; } } } } else { for(kk = m2+1; kk >= 0; kk--) { if (pos[k].pos + kk > i) { pos[k].edited_read[kk] = pos[k].edited_read[kk-1]; if (quals != NULL) pos[k].edited_qual[kk] = pos[k].edited_qual[kk-1]; } else if (pos[k].pos + kk == i) { pos[k].edited_read[kk] = '-'; if (quals != NULL) { pos[k].edited_qual[kk] = (pos[k].edited_qual[kk-1] + pos[k].edited_qual[kk+1])/2; } } } } } } } } else { printf("Undefined dp_trace %d %d\n", i, j); alignment->ok = 0; return end; } } #ifdef DEBUG_ALIGN printf("\nTracing done.\n"); #endif pos[p].pos = i; i--; #ifdef DEBUG_ALIGN printf("Read position: %d -> %d\n", i, i+len); er[len] = '\0'; printf("ER: %s\n", er); #endif /* Update alignment counts */ for(j = 0; j < len; j++) { switch(er[j]) { case 'A': case 'a': alignment->contigA[i+len-j]++; break; case 'C': case 'c': alignment->contigC[i+len-j]++; break; case 'G': case 'g': alignment->contigG[i+len-j]++; break; case 'T': case 't': alignment->contigT[i+len-j]++; break; case 'N': case 'n': case '-': alignment->contigN[i+len-j]++; break; default: printf("Trash in er %d %d/%d\n", er[j], j, len); } } if (pos[p].ori == 'C') { for(j = 0; j < len; j++) { if (quals != NULL) pos[p].edited_qual[j] = erq[j]; switch(er[j]) { case 'A': case 'a': pos[p].edited_read[j] = 'T'; break; case 'C': case 'c': pos[p].edited_read[j] = 'G'; break; case 'G': case 'g': pos[p].edited_read[j] = 'C'; break; case 'T': case 't': pos[p].edited_read[j] = 'A'; break; default: pos[p].edited_read[j] = er[j]; break; } } pos[p].edited_read[len] = '\0'; if (quals != NULL) pos[p].edited_qual[len] = '\0'; } else { for(j = 0; j < len; j++) { pos[p].edited_read[j] = er[len-j-1]; if (quals != NULL) pos[p].edited_qual[j] = erq[len-j-1]; } pos[p].edited_read[len] = '\0'; if (quals != NULL) pos[p].edited_qual[len] = '\0'; } pos[p].aligned = 1; return end; } /** * Compute a multiple alignment. * - alignment: The alignment data structure. * - reads: Array of reads * - subset: input: read ids, orientations and approximate positions for alignment output: read ids, orientations, positions, and read with inserted gaps * - size: the number of reads to align * Returns the number of reads aligned. This may be smaller than size if reads are specified * multiple times in the input data. */ int multi_align(align *alignment, char **reads, char **qual, read_pos *subset, int size, double max_error_rate, int max_aligned_reads, int match_reward, int mm_penalty, int gap_penalty) { int i,j; int end; int offset; /* Initialize alignment */ alignment->len = 0; alignment->offset = 0; alignment->ok = 1; /* Sort the reads according to read ids */ qsort(subset, size, sizeof(read_pos), compare_read); /* Remove any read that appears twice */ i = 0; subset[0].count = 1; subset[0].aligned = 0; for(j = 1; j < size; j++) { if (subset[i].read != subset[j].read) { i++; if (i != j) { subset[i].read = subset[j].read; subset[i].pos = subset[j].pos; subset[i].ori = subset[j].ori; subset[i].occ = subset[j].occ; subset[i].count = 1; } subset[i].aligned = 0; } else { subset[i].count++; } } size = i+1; if (size > max_aligned_reads) { #ifdef DEBUG_ALIGN printf("Too many reads to align %d\n", size); #endif alignment->ok = 0; return 0; } /* Sort the reads according to counts */ qsort(subset, size, sizeof(read_pos), compare_count); offset = -subset[0].pos; for(i = 0; i < size; i++) { subset[i].pos = subset[i].pos+offset; } /* Form the alignment */ end = 0; for(i = 0; i < size && alignment->ok; i++) { j = align_read(alignment, reads, qual, subset, end, i, max_error_rate, match_reward, mm_penalty, gap_penalty); if (j > 0) end = j; if (!alignment->ok) return 0; #ifdef DEBUG_ALIGN printf("Consensus ends at %d\n", end); printf("Consensus offset is %d\n", alignment->offset); #endif } alignment->len = end; update_consensus(alignment, 0, end); #ifdef DEBUG_ALIGN printf("Consensus ends at %d\n", alignment->len); #endif i = 0; for( j = 0; j < size; j++) { if (subset[j].aligned) { if (i != j) { subset[i].read = subset[j].read; subset[i].pos = subset[j].pos; subset[i].ori = subset[j].ori; subset[i].occ = subset[j].occ; subset[i].count = subset[j].count; subset[i].aligned = subset[j].aligned; strcpy(subset[i].edited_read, subset[j].edited_read); if (qual != NULL) { memcpy(subset[i].edited_qual, subset[j].edited_qual, strlen(subset[i].edited_read)); } subset[i].edits = subset[j].edits; } i++; } } return i; } /* Remove such reads from read_align that do not share s k-length alignment with read r. Return the number of such reads */ int kalign_share(align *alignment, read_pos *read_align, int size, int k, int r, char **reads) { int s, e; int i; int s2, e2; int sc, ec; int count = 0; if (!alignment->ok) return size; s = read_align[r].pos; e = s + strlen(read_align[r].edited_read); for(i = 0; i < size; i++) { s2 = read_align[i].pos; e2 = s2 + strlen(read_align[i].edited_read); sc = s < s2 ? s2:s; ec = e < e2 ? e:e2; if (ec - sc >= k) { if (count != i) { read_align[count].read = read_align[i].read; read_align[count].pos = read_align[i].pos; read_align[count].ori = read_align[i].ori; read_align[count].edits = read_align[i].edits; strcpy(read_align[count].edited_read, read_align[i].edited_read); } count++; } } return count; } /* Compute the width of the part of the alignment that is shared by all reads. A negative number if returned if no such area is found. */ int common_width(align *alignment, read_pos *read_align, int size) { int s,e; int i; int s2, e2; s = 0; e = MAX_READ_LENGTH; for(i = 0; i < size; i++) { s2 = read_align[i].pos; e2 = s2 + strlen(read_align[i].edited_read); if (s2 > s) s = s2; if (e2 < e) e = e2; } return e-s; } /* Compute the quality of an alignment after the alignment has been calculated. */ double align_quality(align *alignment, read_pos *read_align, int size) { int i,j; int len; int c; uchar buf[MAX_READ_LENGTH]; double min = 1.0; if (!alignment->ok) return 0.0; for(i = 0; i < size; i++) { if (read_align[i].ori == 'U') { strcpy((char *)buf, read_align[i].edited_read); } else { reverse(read_align[i].edited_read, (char *)buf); } len = 0; c = 0; for(j = 0; j < (signed int)strlen((char *)buf); j++) { if (alignment->consensus[read_align[i].pos+j] != '-' || buf[j] != '-') { len++; if (buf[j] == alignment->consensus[read_align[i].pos+j]) { c++; } } } read_align[i].edits = len-c; if ((double)c / (double)len < min) min = (double)c / (double)len; } return min; } /* Get the consensus sequence of an alignment */ int get_consensus(align *alignment, char **cons) { *cons = alignment->consensus; return alignment->len; } void print_stats() { printf("Quick alignments: %d, Banded alignments: %d, Full alignments: %d\n", quick, banded, full); }
imd_main_mpi_3d.c
/****************************************************************************** * * IMD -- The ITAP Molecular Dynamics Program * * Copyright 1996-2011 Institute for Theoretical and Applied Physics, * University of Stuttgart, D-70550 Stuttgart * ******************************************************************************/ /****************************************************************************** * * imd_main_mpi_3d.c -- main loop, mpi specific part, three dimensions * ******************************************************************************/ /****************************************************************************** * $Revision$ * $Date$ ******************************************************************************/ #include "imd.h" /****************************************************************************** * * calc_forces * * The forces of the atoms are calulated here. To achive this, atoms on * the surface of a cpu are exchanged with the neigbours. * * If AR is defined, we use actio=reactio even across CPUs, otherwise we don't * * The force calculation is split into those steps: * * i) send atoms positions of cells on surface neighbours, * receive atom positions from neigbours * ii) zero forces on all cells (local and buffer) * iii) calculate forces in local cells, use lower half of neigbours * for each cell and use actio==reactio * iv) calculate forces also for upper half of neighbours for all cells * that are on the upper surface * iv) or send forces back and add them * ******************************************************************************/ void calc_forces(int steps) { int n, k; real tmpvec1[8], tmpvec2[8] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; /* fill the buffer cells */ if ((steps == steps_min) || (0 == steps % BUFSTEP)) setup_buffers(); { send_cells(copy_cell,pack_cell,unpack_cell); } /* clear global accumulation variables */ tot_pot_energy = 0.0; virial = 0.0; vir_xx = 0.0; vir_yy = 0.0; vir_zz = 0.0; vir_yz = 0.0; vir_zx = 0.0; vir_xy = 0.0; nfc++; /* clear per atom accumulation variables */ #ifdef _OPENMP #pragma omp parallel for #endif for (k=0; k<nallcells; ++k) { int i; cell *p; p = cell_array + k; for (i=0; i<p->n; ++i) { KRAFT(p,i,X) = 0.0; KRAFT(p,i,Y) = 0.0; KRAFT(p,i,Z) = 0.0; //MYMOD #ifdef TTM //#ifndef NBL //mit NBL wird NUMNEIGHS in imd_forces_nbl.c gecleart NUMNEIGHS(p,i)=0; //#endif #endif #ifdef LOD LODP(p,i)=0; #endif //ENDOF MYMOD #ifdef UNIAX DREH_MOMENT(p,i,X) = 0.0; DREH_MOMENT(p,i,Y) = 0.0; DREH_MOMENT(p,i,Z) = 0.0; #endif #if defined(STRESS_TENS) PRESSTENS(p,i,xx) = 0.0; PRESSTENS(p,i,yy) = 0.0; PRESSTENS(p,i,zz) = 0.0; PRESSTENS(p,i,yz) = 0.0; PRESSTENS(p,i,zx) = 0.0; PRESSTENS(p,i,xy) = 0.0; #endif #ifndef MONOLJ POTENG(p,i) = 0.0; #endif #ifdef NNBR NBANZ(p,i) = 0; #endif #ifdef CNA if (cna) MARK(p,i) = 0; #endif #ifdef COVALENT NEIGH(p,i)->n = 0; #endif #ifdef EAM2 EAM_RHO(p,i) = 0.0; /* zero host electron density at atom site */ #ifdef EEAM EAM_P(p,i) = 0.0; /* zero host electron density at atom site */ #endif #endif } } #ifdef RIGID /* clear total forces */ if ( nsuperatoms>0 ) for(k=0; k<nsuperatoms; k++) { superforce[k].x = 0.0; superforce[k].y = 0.0; superforce[k].z = 0.0; } #endif /* What follows is the standard one-cpu force loop acting on our local data cells */ /* compute forces for all pairs of cells */ for (n=0; n<nlists; ++n) { #ifdef _OPENMP #pragma omp parallel for schedule(runtime) \ reduction(+:tot_pot_energy,virial,vir_xx,vir_yy,vir_zz,vir_yz,vir_zx,vir_xy) #endif for (k=0; k<npairs[n]; ++k) { vektor pbc; pair *P; P = pairs[n] + k; pbc.x = P->ipbc[0]*box_x.x + P->ipbc[1]*box_y.x + P->ipbc[2]*box_z.x; pbc.y = P->ipbc[0]*box_x.y + P->ipbc[1]*box_y.y + P->ipbc[2]*box_z.y; pbc.z = P->ipbc[0]*box_x.z + P->ipbc[1]*box_y.z + P->ipbc[2]*box_z.z; do_forces(cell_array + P->np, cell_array + P->nq, pbc, &tot_pot_energy, &virial, &vir_xx, &vir_yy, &vir_zz, &vir_yz, &vir_zx, &vir_xy); } } //MYMOD #ifdef NRB #ifndef NBL nrb_forces(); //Dass mus VOR send_forces geschehen //Im NBL-Fall steht dies direkt am Ende der force-loop, weil send_forces dort //aufgerufen wird und nicht hier //nrb_test_forces(); //TESTZWECKE #endif #endif #ifdef FILTER if(steps>0) if(steps % filter_int ==0) { filter_atoms(); } #endif //ENDOF MYMOD #ifdef COVALENT /* complete neighbor tables for remaining pairs of cells */ for (n=0; n<nlists; ++n) { #ifdef _OPENMP #pragma omp parallel for schedule(runtime) #endif for (k=npairs[n]; k<npairs2[n]; ++k) { vektor pbc; pair *P; P = pairs[n] + k; pbc.x = P->ipbc[0]*box_x.x + P->ipbc[1]*box_y.x + P->ipbc[2]*box_z.x; pbc.y = P->ipbc[0]*box_x.y + P->ipbc[1]*box_y.y + P->ipbc[2]*box_z.y; pbc.z = P->ipbc[0]*box_x.z + P->ipbc[1]*box_y.z + P->ipbc[2]*box_z.z; do_neightab(cell_array + P->np, cell_array + P->nq, pbc); } } #ifndef CNA /* second force loop for covalent systems */ /* does not work correctly - different threads may write to same variables #ifdef _OPENMP #pragma omp parallel for schedule(runtime) \ reduction(+:tot_pot_energy,virial,vir_xx,vir_yy,vir_zz,vir_yz,vir_zx,vir_xy) #endif */ for (k=0; k<ncells; ++k) { do_forces2(cell_array + CELLS(k), &tot_pot_energy, &virial, &vir_xx, &vir_yy, &vir_zz, &vir_yz, &vir_zx, &vir_xy); } #endif #endif /* COVALENT */ #ifndef AR /* If we don't use actio=reactio accross the cpus, we have do do the force loop also on the other half of the neighbours for the cells on the surface of the CPU */ /* compute forces for remaining pairs of cells */ for (n=0; n<nlists; ++n) { /* does not work correctly - different threads may write to same variables #ifdef _OPENMP #pragma omp parallel for schedule(runtime) #endif */ for (k=npairs[n]; k<npairs2[n]; ++k) { vektor pbc; pair *P; P = pairs[n] + k; pbc.x = P->ipbc[0]*box_x.x + P->ipbc[1]*box_y.x + P->ipbc[2]*box_z.x; pbc.y = P->ipbc[0]*box_x.y + P->ipbc[1]*box_y.y + P->ipbc[2]*box_z.y; pbc.z = P->ipbc[0]*box_x.z + P->ipbc[1]*box_y.z + P->ipbc[2]*box_z.z; /* potential energy and virial are already complete; */ /* to avoid double counting, we update only the dummy tmpvec2 */ do_forces(cell_array + P->np, cell_array + P->nq, pbc, tmpvec2, tmpvec2+1, tmpvec2+2, tmpvec2+3, tmpvec2+4, tmpvec2+5, tmpvec2+6, tmpvec2+7); } } #endif /* not AR */ #ifdef EAM2 #ifdef AR /* collect host electron density */ send_forces(add_rho,pack_rho,unpack_add_rho); #endif /* compute embedding energy and its derivative */ do_embedding_energy(); /* distribute derivative of embedding energy */ send_cells(copy_dF,pack_dF,unpack_dF); /* second EAM2 loop over all cells pairs */ for (n=0; n<nlists; ++n) { #ifdef _OPENMP #pragma omp parallel for schedule(runtime) \ reduction(+:virial,vir_xx,vir_yy,vir_zz,vir_yz,vir_zx,vir_xy) #endif for (k=0; k<npairs[n]; ++k) { vektor pbc; pair *P; P = pairs[n]+k; pbc.x = P->ipbc[0]*box_x.x + P->ipbc[1]*box_y.x + P->ipbc[2]*box_z.x; pbc.y = P->ipbc[0]*box_x.y + P->ipbc[1]*box_y.y + P->ipbc[2]*box_z.y; pbc.z = P->ipbc[0]*box_x.z + P->ipbc[1]*box_y.z + P->ipbc[2]*box_z.z; do_forces_eam2(cell_array + P->np, cell_array + P->nq, pbc, &virial, &vir_xx, &vir_yy, &vir_zz, &vir_yz, &vir_zx, &vir_xy); } } #ifndef AR /* If we don't use actio=reactio accross the cpus, we have do do the force loop also on the other half of the neighbours for the cells on the surface of the CPU */ /* compute forces for remaining pairs of cells */ for (n=0; n<nlists; ++n) { #ifdef _OPENMP #pragma omp parallel for schedule(runtime) #endif for (k=npairs[n]; k<npairs2[n]; ++k) { vektor pbc; pair *P; P = pairs[n]+k; pbc.x = P->ipbc[0]*box_x.x + P->ipbc[1]*box_y.x + P->ipbc[2]*box_z.x; pbc.y = P->ipbc[0]*box_x.y + P->ipbc[1]*box_y.y + P->ipbc[2]*box_z.y; pbc.z = P->ipbc[0]*box_x.z + P->ipbc[1]*box_y.z + P->ipbc[2]*box_z.z; /* potential energy and virial are already complete; */ /* to avoid double counting, we update only the dummy tmpvec2 */ //MY MOD: Hier ist ein Fehler durch kopieren von 2ten do_forces(..) entstanden! // do_forces_eam2 hat 1 parameter weniger: tmpvec2 entspricht nämlich tot_pot_eng, // was es hier nicht gibt! /* do_forces_eam2(cell_array + P->np, cell_array + P->nq, pbc, tmpvec2, tmpvec2+1, tmpvec2+2, tmpvec2+3, tmpvec2+4, tmpvec2+5, tmpvec2+6, tmpvec2+7); */ do_forces_eam2(cell_array + P->np, cell_array + P->nq, pbc, tmpvec2+1, tmpvec2+2, tmpvec2+3, tmpvec2+4, tmpvec2+5, tmpvec2+6, tmpvec2+7); } } #endif /* not AR */ #endif /* EAM2 */ /* sum up results of different CPUs */ tmpvec1[0] = tot_pot_energy; tmpvec1[1] = virial; tmpvec1[2] = vir_xx; tmpvec1[3] = vir_yy; tmpvec1[4] = vir_zz; tmpvec1[5] = vir_yz; tmpvec1[6] = vir_zx; tmpvec1[7] = vir_xy; MPI_Allreduce( tmpvec1, tmpvec2, 8, REAL, MPI_SUM, cpugrid); tot_pot_energy = tmpvec2[0]; virial = tmpvec2[1]; vir_xx = tmpvec2[2]; vir_yy = tmpvec2[3]; vir_zz = tmpvec2[4]; vir_yz = tmpvec2[5]; vir_zx = tmpvec2[6]; vir_xy = tmpvec2[7]; #ifdef AR send_forces(add_forces,pack_forces,unpack_forces); #endif }
stats.c
//----------------------------------------------------------------------------- // stats.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (Build 5.1.001) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // 05/10/18 (Build 5.1.013) // Author: L. Rossman (EPA) // R. Dickinson (CDM) // // Simulation statistics functions. // // Build 5.1.007: // - Exfiltration losses added to storage node statistics. // // Build 5.1.008: // - Support for updating groundwater statistics added. // - Support for updating maximum reported nodal depths added. // - OpenMP parallelization applied to updating node and link flow statistics. // - Updating of time that conduit is upstrm/dnstrm full was modified. // // Build 5.1.011: // - Surcharging is now evaluated only under dynamic wave flow routing and // storage nodes cannot be classified as surcharged. // // Build 5.1.012: // - Time step statistics now evaluated only in non-steady state periods. // - Check for full conduit flow now accounts for number of barrels. // // Build 5.1.013: // - Include omp.h protected against lack of compiler support for OpenMP. // - Statistics on impervious and pervious runoff totals added. // - Storage nodes with a non-zero surcharge depth (e.g. enclosed tanks) // can now be classified as being surcharged. //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <string.h> #include <math.h> #include "headers.h" #include "swmm5.h" #if defined(_OPENMP) //(5.1.013) #include <omp.h> #endif //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- #define MAX_STATS 5 static TSysStats SysStats; static TMaxStats MaxMassBalErrs[MAX_STATS]; static TMaxStats MaxCourantCrit[MAX_STATS]; static TMaxStats MaxFlowTurns[MAX_STATS]; static double SysOutfallFlow; //----------------------------------------------------------------------------- // Exportable variables (shared with statsrpt.c) //----------------------------------------------------------------------------- TSubcatchStats* SubcatchStats; TNodeStats* NodeStats; TLinkStats* LinkStats; TStorageStats* StorageStats; TOutfallStats* OutfallStats; TPumpStats* PumpStats; double MaxOutfallFlow; double MaxRunoffFlow; //----------------------------------------------------------------------------- // Imported variables //----------------------------------------------------------------------------- extern double* NodeInflow; // defined in massbal.c extern double* NodeOutflow; // defined in massbal.c //----------------------------------------------------------------------------- // External functions (declared in funcs.h) //----------------------------------------------------------------------------- // stats_open (called from swmm_start in swmm5.c) // stats_close (called from swmm_end in swmm5.c) // stats_report (called from swmm_end in swmm5.c) // stats_updateSubcatchStats (called from subcatch_getRunoff) // stats_updateGwaterStats (called from gwater_getGroundwater) // stats_updateFlowStats (called from routing_execute) // stats_updateCriticalTimeCount (called from getVariableStep in dynwave.c) // stats_updateMaxNodeDepth (called from output_saveNodeResults) //----------------------------------------------------------------------------- // Local functions //----------------------------------------------------------------------------- static void stats_updateNodeStats(int node, double tStep, DateTime aDate); static void stats_updateLinkStats(int link, double tStep, DateTime aDate); static void stats_findMaxStats(void); static void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x); //============================================================================= int stats_open() // // Input: none // Output: returns an error code // Purpose: opens the simulation statistics system. // { int j, k; // --- set all pointers to NULL NodeStats = NULL; LinkStats = NULL; StorageStats = NULL; OutfallStats = NULL; PumpStats = NULL; // --- allocate memory for & initialize subcatchment statistics SubcatchStats = NULL; if ( Nobjects[SUBCATCH] > 0 ) { SubcatchStats = (TSubcatchStats *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatchStats)); if ( !SubcatchStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (j=0; j<Nobjects[SUBCATCH]; j++) { SubcatchStats[j].precip = 0.0; SubcatchStats[j].runon = 0.0; SubcatchStats[j].evap = 0.0; SubcatchStats[j].infil = 0.0; SubcatchStats[j].runoff = 0.0; SubcatchStats[j].maxFlow = 0.0; SubcatchStats[j].impervRunoff = 0.0; //(5.1.013) SubcatchStats[j].pervRunoff = 0.0; // } for (j=0; j<Nobjects[SUBCATCH]; j++) { if ( Subcatch[j].groundwater == NULL ) continue; Subcatch[j].groundwater->stats.avgUpperMoist = 0.0; Subcatch[j].groundwater->stats.avgWaterTable = 0.0; Subcatch[j].groundwater->stats.infil = 0.0; Subcatch[j].groundwater->stats.latFlow = 0.0; Subcatch[j].groundwater->stats.deepFlow = 0.0; Subcatch[j].groundwater->stats.evap = 0.0; Subcatch[j].groundwater->stats.maxFlow = 0.0; } } // --- allocate memory for node & link stats if ( Nobjects[LINK] > 0 ) { NodeStats = (TNodeStats *) calloc(Nobjects[NODE], sizeof(TNodeStats)); LinkStats = (TLinkStats *) calloc(Nobjects[LINK], sizeof(TLinkStats)); if ( !NodeStats || !LinkStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } } // --- initialize node stats if ( NodeStats ) for ( j = 0; j < Nobjects[NODE]; j++ ) { NodeStats[j].avgDepth = 0.0; NodeStats[j].maxDepth = 0.0; NodeStats[j].maxDepthDate = StartDateTime; NodeStats[j].maxRptDepth = 0.0; NodeStats[j].volFlooded = 0.0; NodeStats[j].timeFlooded = 0.0; NodeStats[j].timeSurcharged = 0.0; NodeStats[j].timeCourantCritical = 0.0; NodeStats[j].totLatFlow = 0.0; NodeStats[j].maxLatFlow = 0.0; NodeStats[j].maxInflow = 0.0; NodeStats[j].maxOverflow = 0.0; NodeStats[j].maxPondedVol = 0.0; NodeStats[j].maxInflowDate = StartDateTime; NodeStats[j].maxOverflowDate = StartDateTime; } // --- initialize link stats if ( LinkStats ) for ( j = 0; j < Nobjects[LINK]; j++ ) { LinkStats[j].maxFlow = 0.0; LinkStats[j].maxVeloc = 0.0; LinkStats[j].maxDepth = 0.0; LinkStats[j].timeSurcharged = 0.0; LinkStats[j].timeFullUpstream = 0.0; LinkStats[j].timeFullDnstream = 0.0; LinkStats[j].timeFullFlow = 0.0; LinkStats[j].timeCapacityLimited = 0.0; LinkStats[j].timeCourantCritical = 0.0; for (k=0; k<MAX_FLOW_CLASSES; k++) LinkStats[j].timeInFlowClass[k] = 0.0; LinkStats[j].flowTurns = 0; LinkStats[j].flowTurnSign = 0; } // --- allocate memory for & initialize storage unit statistics if ( Nnodes[STORAGE] > 0 ) { StorageStats = (TStorageStats *) calloc(Nnodes[STORAGE], sizeof(TStorageStats)); if ( !StorageStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( k = 0; k < Nobjects[NODE]; k++ ) { if ( Node[k].type != STORAGE ) continue; j = Node[k].subIndex; StorageStats[j].initVol = Node[k].newVolume; StorageStats[j].avgVol = 0.0; StorageStats[j].maxVol = 0.0; StorageStats[j].maxFlow = 0.0; StorageStats[j].evapLosses = 0.0; StorageStats[j].exfilLosses = 0.0; StorageStats[j].maxVolDate = StartDateTime; } } // --- allocate memory for & initialize outfall statistics if ( Nnodes[OUTFALL] > 0 ) { OutfallStats = (TOutfallStats *) calloc(Nnodes[OUTFALL], sizeof(TOutfallStats)); if ( !OutfallStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nnodes[OUTFALL]; j++ ) { OutfallStats[j].avgFlow = 0.0; OutfallStats[j].maxFlow = 0.0; OutfallStats[j].totalPeriods = 0; if ( Nobjects[POLLUT] > 0 ) { OutfallStats[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); if ( !OutfallStats[j].totalLoad ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (k=0; k<Nobjects[POLLUT]; k++) OutfallStats[j].totalLoad[k] = 0.0; } else OutfallStats[j].totalLoad = NULL; } } // --- allocate memory & initialize pumping statistics if ( Nlinks[PUMP] > 0 ) { PumpStats = (TPumpStats *) calloc(Nlinks[PUMP], sizeof(TPumpStats)); if ( !PumpStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nlinks[PUMP]; j++ ) { PumpStats[j].utilized = 0.0; PumpStats[j].minFlow = 0.0; PumpStats[j].avgFlow = 0.0; PumpStats[j].maxFlow = 0.0; PumpStats[j].volume = 0.0; PumpStats[j].energy = 0.0; PumpStats[j].startUps = 0; PumpStats[j].offCurveLow = 0.0; PumpStats[j].offCurveHigh = 0.0; } } // --- initialize system stats MaxRunoffFlow = 0.0; MaxOutfallFlow = 0.0; SysStats.maxTimeStep = 0.0; SysStats.minTimeStep = RouteStep; SysStats.avgTimeStep = 0.0; SysStats.avgStepCount = 0.0; SysStats.steadyStateCount = 0.0; return 0; } //============================================================================= void stats_close() // // Input: none // Output: // Purpose: closes the simulation statistics system. // { int j; FREE(SubcatchStats); FREE(NodeStats); FREE(LinkStats); FREE(StorageStats); if ( OutfallStats ) { for ( j=0; j<Nnodes[OUTFALL]; j++ ) FREE(OutfallStats[j].totalLoad); FREE(OutfallStats); } FREE(PumpStats); } //============================================================================= void stats_report() // // Input: none // Output: none // Purpose: reports simulation statistics. // { // --- report flow routing accuracy statistics if ( Nobjects[LINK] > 0 && RouteModel != NO_ROUTING ) { stats_findMaxStats(); report_writeMaxStats(MaxMassBalErrs, MaxCourantCrit, MAX_STATS); report_writeMaxFlowTurns(MaxFlowTurns, MAX_STATS); report_writeSysStats(&SysStats); } // --- report summary statistics statsrpt_writeReport(); } //============================================================================= void stats_updateSubcatchStats(int j, double rainVol, double runonVol, double evapVol, double infilVol, double impervVol, double pervVol, double runoffVol, double runoff) // // Input: j = subcatchment index // rainVol = rainfall + snowfall volume (ft3) // runonVol = runon volume from other subcatchments (ft3) // evapVol = evaporation volume (ft3) // infilVol = infiltration volume (ft3) // impervVol = impervious runoff volume (ft3) // pervVol = pervious runoff volume (ft3) // runoffVol = runoff volume (ft3) // runoff = runoff rate (cfs) // Output: none // Purpose: updates totals of runoff components for a specific subcatchment. // { SubcatchStats[j].precip += rainVol; SubcatchStats[j].runon += runonVol; SubcatchStats[j].evap += evapVol; SubcatchStats[j].infil += infilVol; SubcatchStats[j].runoff += runoffVol; SubcatchStats[j].maxFlow = MAX(SubcatchStats[j].maxFlow, runoff); SubcatchStats[j].impervRunoff += impervVol; //(5.1.013) SubcatchStats[j].pervRunoff += pervVol; // } //============================================================================= void stats_updateGwaterStats(int j, double infil, double evap, double latFlow, double deepFlow, double theta, double waterTable, double tStep) { Subcatch[j].groundwater->stats.infil += infil * tStep; Subcatch[j].groundwater->stats.evap += evap * tStep; Subcatch[j].groundwater->stats.latFlow += latFlow * tStep; Subcatch[j].groundwater->stats.deepFlow += deepFlow * tStep; Subcatch[j].groundwater->stats.avgUpperMoist += theta * tStep; Subcatch[j].groundwater->stats.avgWaterTable += waterTable * tStep; Subcatch[j].groundwater->stats.finalUpperMoist = theta; Subcatch[j].groundwater->stats.finalWaterTable = waterTable; if ( fabs(latFlow) > fabs(Subcatch[j].groundwater->stats.maxFlow) ) { Subcatch[j].groundwater->stats.maxFlow = latFlow; } } //============================================================================= void stats_updateMaxRunoff() // // Input: none // Output: updates global variable MaxRunoffFlow // Purpose: updates value of maximum system runoff rate. // { int j; double sysRunoff = 0.0; for (j=0; j<Nobjects[SUBCATCH]; j++) sysRunoff += Subcatch[j].newRunoff; MaxRunoffFlow = MAX(MaxRunoffFlow, sysRunoff); } //============================================================================= void stats_updateMaxNodeDepth(int j, double depth) // // Input: j = node index // depth = water depth at node at current reporting time (ft) // Output: none // Purpose: updates a node's maximum depth recorded at reporting times. // { if ( NodeStats != NULL ) NodeStats[j].maxRptDepth = MAX(NodeStats[j].maxRptDepth, depth); } //============================================================================= void stats_updateFlowStats(double tStep, DateTime aDate, int stepCount, int steadyState) // // Input: tStep = routing time step (sec) // aDate = current date/time // stepCount = # steps required to solve routing at current time period // steadyState = TRUE if steady flow conditions exist // Output: none // Purpose: updates various flow routing statistics at current time period. // { int j; // --- update stats only after reporting period begins if ( aDate < ReportStart ) return; SysOutfallFlow = 0.0; // --- update node & link stats #pragma omp parallel num_threads(NumThreads) { #pragma omp for for ( j=0; j<Nobjects[NODE]; j++ ) stats_updateNodeStats(j, tStep, aDate); #pragma omp for for ( j=0; j<Nobjects[LINK]; j++ ) stats_updateLinkStats(j, tStep, aDate); } // --- update count of times in steady state SysStats.steadyStateCount += steadyState; // --- update time step stats if not in steady state if ( steadyState == FALSE ) { // --- skip initial time step for min. value) if ( OldRoutingTime > 0 ) { SysStats.minTimeStep = MIN(SysStats.minTimeStep, tStep); } SysStats.avgTimeStep += tStep; SysStats.maxTimeStep = MAX(SysStats.maxTimeStep, tStep); // --- update iteration step count stats SysStats.avgStepCount += stepCount; } // --- update max. system outfall flow MaxOutfallFlow = MAX(MaxOutfallFlow, SysOutfallFlow); } //============================================================================= void stats_updateCriticalTimeCount(int node, int link) // // Input: node = node index // link = link index // Output: none // Purpose: updates count of times a node or link was time step-critical. // { if ( node >= 0 ) NodeStats[node].timeCourantCritical += 1.0; else if ( link >= 0 ) LinkStats[link].timeCourantCritical += 1.0; } //============================================================================= void stats_updateNodeStats(int j, double tStep, DateTime aDate) // // Input: j = node index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a node. // { int k, p; double newVolume = Node[j].newVolume; double newDepth = Node[j].newDepth; double yCrown = Node[j].crownElev - Node[j].invertElev; int canPond = (AllowPonding && Node[j].pondedArea > 0.0); // --- update depth statistics NodeStats[j].avgDepth += newDepth; if ( newDepth > NodeStats[j].maxDepth ) { NodeStats[j].maxDepth = newDepth; NodeStats[j].maxDepthDate = aDate; } // --- update flooding, ponding, and surcharge statistics if ( Node[j].type != OUTFALL ) { if ( newVolume > Node[j].fullVolume || Node[j].overflow > 0.0 ) { NodeStats[j].timeFlooded += tStep; NodeStats[j].volFlooded += Node[j].overflow * tStep; if ( canPond ) NodeStats[j].maxPondedVol = MAX(NodeStats[j].maxPondedVol, (newVolume - Node[j].fullVolume)); } // --- for dynamic wave routing, classify a node as //(5.1.013) // surcharged if its water level exceeds its crown elev. if (RouteModel == DW) //(5.1.013) { if ((Node[j].type != STORAGE || Node[j].surDepth > 0.0) && //(5.1.013) newDepth + Node[j].invertElev + FUDGE >= Node[j].crownElev) { NodeStats[j].timeSurcharged += tStep; } } } // --- update storage statistics if ( Node[j].type == STORAGE ) { k = Node[j].subIndex; StorageStats[k].avgVol += newVolume; StorageStats[k].evapLosses += Storage[Node[j].subIndex].evapLoss; StorageStats[k].exfilLosses += Storage[Node[j].subIndex].exfilLoss; newVolume = MIN(newVolume, Node[j].fullVolume); if ( newVolume > StorageStats[k].maxVol ) { StorageStats[k].maxVol = newVolume; StorageStats[k].maxVolDate = aDate; } StorageStats[k].maxFlow = MAX(StorageStats[k].maxFlow, Node[j].outflow); } // --- update outfall statistics if ( Node[j].type == OUTFALL ) { k = Node[j].subIndex; if ( Node[j].inflow >= MIN_RUNOFF_FLOW ) { OutfallStats[k].avgFlow += Node[j].inflow; OutfallStats[k].maxFlow = MAX(OutfallStats[k].maxFlow, Node[j].inflow); OutfallStats[k].totalPeriods++; } for (p=0; p<Nobjects[POLLUT]; p++) { OutfallStats[k].totalLoad[p] += Node[j].inflow * Node[j].newQual[p] * tStep; } SysOutfallFlow += Node[j].inflow; } // --- update inflow statistics NodeStats[j].totLatFlow += ( (Node[j].oldLatFlow + Node[j].newLatFlow) * 0.5 * tStep ); if ( fabs(Node[j].newLatFlow) > fabs(NodeStats[j].maxLatFlow) ) NodeStats[j].maxLatFlow = Node[j].newLatFlow; if ( Node[j].inflow > NodeStats[j].maxInflow ) { NodeStats[j].maxInflow = Node[j].inflow; NodeStats[j].maxInflowDate = aDate; } // --- update overflow statistics if ( Node[j].overflow > NodeStats[j].maxOverflow ) { NodeStats[j].maxOverflow = Node[j].overflow; NodeStats[j].maxOverflowDate = aDate; } } //============================================================================= void stats_updateLinkStats(int j, double tStep, DateTime aDate) // // Input: j = link index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a link. // { int k; double q, v; double dq; // --- update max. flow dq = Link[j].newFlow - Link[j].oldFlow; q = fabs(Link[j].newFlow); if ( q > LinkStats[j].maxFlow ) { LinkStats[j].maxFlow = q; LinkStats[j].maxFlowDate = aDate; } // --- update max. velocity v = link_getVelocity(j, q, Link[j].newDepth); if ( v > LinkStats[j].maxVeloc ) { LinkStats[j].maxVeloc = v; } // --- update max. depth if ( Link[j].newDepth > LinkStats[j].maxDepth ) { LinkStats[j].maxDepth = Link[j].newDepth; } if ( Link[j].type == PUMP ) { if ( q >= Link[j].qFull ) LinkStats[j].timeFullFlow += tStep; if ( q > MIN_RUNOFF_FLOW ) { k = Link[j].subIndex; PumpStats[k].minFlow = MIN(PumpStats[k].minFlow, q); PumpStats[k].maxFlow = LinkStats[j].maxFlow; PumpStats[k].avgFlow += q; PumpStats[k].volume += q*tStep; PumpStats[k].utilized += tStep; PumpStats[k].energy += link_getPower(j)*tStep/3600.0; if ( Link[j].flowClass == DN_DRY ) PumpStats[k].offCurveLow += tStep; if ( Link[j].flowClass == UP_DRY ) PumpStats[k].offCurveHigh += tStep; if ( Link[j].oldFlow < MIN_RUNOFF_FLOW ) PumpStats[k].startUps++; PumpStats[k].totalPeriods++; LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; } } else if ( Link[j].type == CONDUIT ) { // --- update time under normal flow & inlet control if ( Link[j].normalFlow ) LinkStats[j].timeNormalFlow += tStep; if ( Link[j].inletControl ) LinkStats[j].timeInletControl += tStep; // --- update flow classification distribution k = Link[j].flowClass; if ( k >= 0 && k < MAX_FLOW_CLASSES ) { ++LinkStats[j].timeInFlowClass[k]; } // --- update time conduit is full k = Link[j].subIndex; if ( q >= Link[j].qFull * (double)Conduit[k].barrels ) LinkStats[j].timeFullFlow += tStep; if ( Conduit[k].capacityLimited ) LinkStats[j].timeCapacityLimited += tStep; switch (Conduit[k].fullState) { case ALL_FULL: LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; break; case UP_FULL: LinkStats[j].timeFullUpstream += tStep; break; case DN_FULL: LinkStats[j].timeFullDnstream += tStep; } } // --- update flow turn count k = LinkStats[j].flowTurnSign; LinkStats[j].flowTurnSign = SGN(dq); if ( fabs(dq) > 0.001 && k * LinkStats[j].flowTurnSign < 0 ) LinkStats[j].flowTurns++; } //============================================================================= void stats_findMaxStats() // // Input: none // Output: none // Purpose: finds nodes & links with highest mass balance errors // & highest times Courant time-step critical. // { int j; double x; // --- initialize max. stats arrays for (j=0; j<MAX_STATS; j++) { MaxMassBalErrs[j].objType = NODE; MaxMassBalErrs[j].index = -1; MaxMassBalErrs[j].value = -1.0; MaxCourantCrit[j].index = -1; MaxCourantCrit[j].value = -1.0; MaxFlowTurns[j].index = -1; MaxFlowTurns[j].value = -1.0; } // --- find links with most flow turns if ( StepCount > 2 ) { for (j=0; j<Nobjects[LINK]; j++) { x = 100.0 * LinkStats[j].flowTurns / (2./3.*(StepCount-2)); stats_updateMaxStats(MaxFlowTurns, LINK, j, x); } } // --- find nodes with largest mass balance errors for (j=0; j<Nobjects[NODE]; j++) { // --- skip terminal nodes and nodes with negligible inflow if ( Node[j].degree <= 0 ) continue; if ( NodeInflow[j] <= 0.1 ) continue; // --- evaluate mass balance error // (Note: NodeInflow & NodeOutflow include any initial and final // stored volumes, respectively). if ( NodeInflow[j] > 0.0 ) x = 1.0 - NodeOutflow[j] / NodeInflow[j]; else if ( NodeOutflow[j] > 0.0 ) x = -1.0; else x = 0.0; stats_updateMaxStats(MaxMassBalErrs, NODE, j, 100.0*x); } // --- stop if not using a variable time step if ( RouteModel != DW || CourantFactor == 0.0 ) return; // --- find nodes most frequently Courant critical if ( StepCount == 0 ) return; for (j=0; j<Nobjects[NODE]; j++) { x = NodeStats[j].timeCourantCritical / StepCount; stats_updateMaxStats(MaxCourantCrit, NODE, j, 100.0*x); } // --- find links most frequently Courant critical for (j=0; j<Nobjects[LINK]; j++) { x = LinkStats[j].timeCourantCritical / StepCount; stats_updateMaxStats(MaxCourantCrit, LINK, j, 100.0*x); } } //============================================================================= void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x) // // Input: maxStats[] = array of critical statistics values // i = object category (NODE or LINK) // j = object index // x = value of statistic for the object // Output: none // Purpose: updates the collection of most critical statistics // { int k; TMaxStats maxStats1, maxStats2; maxStats1.objType = i; maxStats1.index = j; maxStats1.value = x; for (k=0; k<MAX_STATS; k++) { if ( fabs(maxStats1.value) > fabs(maxStats[k].value) ) { maxStats2 = maxStats[k]; maxStats[k] = maxStats1; maxStats1 = maxStats2; } } }
GB_unaryop__identity_int16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_uint16 // op(A') function: GB_tran__identity_int16_uint16 // C type: int16_t // A type: uint16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_uint16 ( int16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
VerletClusterListsRebuilder.h
/** * @file VerletClusterListsRebuilder.h * @author humig * @date 29.07.19 */ #pragma once #include "VerletClusterLists.h" #include "autopas/utils/Timer.h" #include "autopas/utils/inBox.h" namespace autopas { template <class Particle> class VerletClusterLists; namespace internal { /** * Helper class for rebuilding the VerletClusterLists container. * @tparam Particle The type of the particle the container contains. */ template <class Particle> class VerletClusterListsRebuilder { private: size_t _clusterSize; std::vector<Particle> &_particlesToAdd; std::vector<ClusterTower<Particle>> &_towers; double _towerSideLength; int _interactionLengthInTowers; double _towerSideLengthReciprocal; std::array<size_t, 2> _towersPerDim; double _interactionLength; double _interactionLengthSqr; std::array<double, 3> _boxMin; std::array<double, 3> _boxMax; std::array<double, 3> _haloBoxMin; std::array<double, 3> _haloBoxMax; public: /** * Constructs the builder from the cluster list. * * @param clusterList The cluster list to rebuild the neighbor lists for. * @param towers The towers from the cluster list to rebuild. * @param particlesToAdd New particles to add. * @param clusterSize Size of the clusters in particles. */ VerletClusterListsRebuilder(const VerletClusterLists<Particle> &clusterList, std::vector<ClusterTower<Particle>> &towers, std::vector<Particle> &particlesToAdd, size_t clusterSize) : _clusterSize(clusterSize), _particlesToAdd(particlesToAdd), _towers(towers), _towerSideLength(clusterList.getTowerSideLength()), _interactionLengthInTowers(clusterList.getNumTowersPerInteractionLength()), _towerSideLengthReciprocal(clusterList.getTowerSideLengthReciprocal()), _towersPerDim(clusterList.getTowersPerDimension()), _interactionLength(clusterList.getInteractionLength()), _interactionLengthSqr(_interactionLength * _interactionLength), _boxMin(clusterList.getBoxMin()), _boxMax(clusterList.getBoxMax()), _haloBoxMin(clusterList.getHaloBoxMin()), _haloBoxMax(clusterList.getHaloBoxMax()) {} /** * Rebuilds the towers, clusters, and neighbor lists. * * @return new values for VerletClusterLists member variables. They are returned as tuple consisting of: * { * double: The new side length of each tower in xy-direction, * int: The interaction length in towers using the new tower side length, * std::array<size_t, 2>: The number of towers in each dimension using the new tower side length, * size_t: The new number of clusters in the container, * } */ auto rebuildTowersAndClusters() { auto invalidParticles = collectAllParticlesFromTowers(); invalidParticles.push_back(std::move(_particlesToAdd)); _particlesToAdd.clear(); // count particles by accumulating tower sizes size_t numParticles = std::accumulate(std::begin(invalidParticles), std::end(invalidParticles), 0, [](auto acc, auto &v) { return acc + v.size(); }); auto boxSizeWithHalo = utils::ArrayMath::sub(_haloBoxMax, _haloBoxMin); _towerSideLength = estimateOptimalGridSideLength(numParticles, boxSizeWithHalo, _clusterSize); _towerSideLengthReciprocal = 1 / _towerSideLength; _interactionLengthInTowers = static_cast<int>(std::ceil(_interactionLength * _towerSideLengthReciprocal)); _towersPerDim = calculateTowersPerDim(boxSizeWithHalo, _towerSideLengthReciprocal); size_t numTowers = _towersPerDim[0] * _towersPerDim[1]; // resize to number of towers. Cannot use resize since towers are not default constructable. _towers.clear(); _towers.reserve(numTowers); // create towers and make an estimate for how many particles memory needs to be allocated // 2.7 seems high but gave the best performance when testing const size_t sizeEstimation = (static_cast<double>(numParticles) / numTowers) * 2.7; for (int i = 0; i < numTowers; ++i) { _towers.emplace_back(ClusterTower<Particle>(_clusterSize)); _towers[i].reserve(sizeEstimation); } sortParticlesIntoTowers(invalidParticles); // generate clusters and count them size_t numClusters = 0; for (auto &tower : _towers) { numClusters += tower.generateClusters(); } return std::make_tuple(_towerSideLength, _interactionLengthInTowers, _towersPerDim, numClusters); } /** * Rebuilds the neighbor lists and fills Clusters with dummies as described in * ClusterTower::fillUpWithDummyParticles. * @param useNewton3 Specifies, whether neighbor lists should use newton3. This changes the way what the lists * contain. If an cluster A interacts with cluster B, then this interaction will either show up only once in the * interaction lists of the custers (for newton3 == true) or show up in the interaction lists of both (for newton3 == * false) */ void rebuildNeighborListsAndFillClusters(bool useNewton3) { clearNeighborListsAndResetDummies(); updateNeighborLists(useNewton3); double dummyParticleDistance = _interactionLength * 2; double startDummiesX = 1000 * _haloBoxMax[0]; for (size_t index = 0; index < _towers.size(); index++) { _towers[index].fillUpWithDummyParticles(startDummiesX + index * dummyParticleDistance, dummyParticleDistance); } } /** * Estimates the optimal grid side length. * @param numParticles The number of particles in the container. * @param boxSize The size of the domain. * @param clusterSize the number of particles per cluster. * @return an estimated optimal grid side length. */ [[nodiscard]] static double estimateOptimalGridSideLength(size_t numParticles, std::array<double, 3> boxSize, size_t clusterSize) { double volume = boxSize[0] * boxSize[1] * boxSize[2]; if (numParticles > 0) { // estimate particle density double density = numParticles / volume; return std::cbrt(clusterSize / density); } else { return std::max(boxSize[0], boxSize[1]); } } /** * Calculates the cells per dimension in the container using the _towerSideLengthReciprocal. * @param boxSize the size of the domain. * @param towerSideLengthReciprocal 1.0 / towerSidelength. * @return the cells per dimension in the container. */ [[nodiscard]] static std::array<size_t, 2> calculateTowersPerDim(std::array<double, 3> boxSize, double towerSideLengthReciprocal) { std::array<size_t, 2> towersPerDim{}; for (int d = 0; d < 2; d++) { towersPerDim[d] = static_cast<size_t>(std::ceil(boxSize[d] * towerSideLengthReciprocal)); // at least one cell towersPerDim[d] = std::max(towersPerDim[d], 1ul); } return towersPerDim; } /** * Removes previously saved neighbors from clusters and sets the positions of the dummy particles to inside of the * cluster. The latter reduces the amount of calculated interaction partners. */ void clearNeighborListsAndResetDummies() { for (auto &tower : _towers) { tower.setDummyParticlesToLastActualParticle(); for (auto &cluster : tower.getClusters()) { cluster.clearNeighbors(); } } } /** * Takes all particles from all towers and returns them. Towers are cleared afterwards. * @return All particles in the container sorted in 2D as they were in the towers. */ std::vector<std::vector<Particle>> collectAllParticlesFromTowers() { std::vector<std::vector<Particle>> invalidParticles; invalidParticles.resize(_towers.size()); for (size_t towerIndex = 0; towerIndex < _towers.size(); towerIndex++) { invalidParticles[towerIndex] = _towers[towerIndex].collectAllActualParticles(); _towers[towerIndex].clear(); } return invalidParticles; } /** * Sorts all passed particles in the appropriate clusters. * * @note This Function takes a 2D vector because it expects the layout from the old clusters. * The information however, is not utilized hence when in doubt all particles can go in one vector. * * @param particles2D The particles to sort in the towers. */ void sortParticlesIntoTowers(const std::vector<std::vector<Particle>> &particles2D) { const auto numVectors = particles2D.size(); #if defined(AUTOPAS_OPENMP) /// @todo: find sensible chunksize #pragma omp parallel for schedule(dynamic) #endif for (size_t index = 0; index < numVectors; index++) { const std::vector<Particle> &vector = particles2D[index]; for (const auto &particle : vector) { if (utils::inBox(particle.getR(), _haloBoxMin, _haloBoxMax)) { auto &tower = getTower(particle.getR()); tower.addParticle(particle); } else { AutoPasLog(trace, "Not adding particle to VerletClusterLists container, because it is far outside:\n{}", particle.toString()); } } } } /** * Updates the neighbor lists. * @param useNewton3 Specifies, whether neighbor lists should use newton3. This changes the way what the lists * contain. If an cluster A interacts with cluster B, then this interaction will either show up only once in the * interaction lists of the custers (for newton3 == true) or show up in the interaction lists of both (for newton3 == * false) */ void updateNeighborLists(bool useNewton3) { const int maxTowerIndexX = _towersPerDim[0] - 1; const int maxTowerIndexY = _towersPerDim[1] - 1; // for all towers #if defined(AUTOPAS_OPENMP) /// @todo: find sensible chunksize #pragma omp parallel for schedule(dynamic) collapse(2) #endif for (int towerIndexY = 0; towerIndexY <= maxTowerIndexY; towerIndexY++) { for (int towerIndexX = 0; towerIndexX <= maxTowerIndexX; towerIndexX++) { const int minX = std::max(towerIndexX - _interactionLengthInTowers, 0); const int minY = std::max(towerIndexY - _interactionLengthInTowers, 0); const int maxX = std::min(towerIndexX + _interactionLengthInTowers, maxTowerIndexX); const int maxY = std::min(towerIndexY + _interactionLengthInTowers, maxTowerIndexY); iterateNeighborTowers(towerIndexX, towerIndexY, minX, maxX, minY, maxY, useNewton3, [this](auto &towerA, auto &towerB, double distBetweenTowersXYsqr, bool useNewton3) { calculateNeighborsBetweenTowers(towerA, towerB, distBetweenTowersXYsqr, useNewton3); }); } } } /** * For all clusters in a tower, given by it's x/y indices, find all neighbors in towers that are given by an area * (min/max x/y neighbor indices). * * With the useNewton3 parameter, the lists can be either built containing all, or only the forward neighbors. * If an cluster A interacts with cluster B, then this interaction will either show up only once in the * interaction lists of the custers (for newton3 == true) or show up in the interaction lists of both * (for newton3 == false) * * @tparam FunType type of function * @param towerIndexX The x index of the given tower. * @param towerIndexY The y index of the given tower. * @param minNeighborIndexX The minimum neighbor tower index in x direction. * @param maxNeighborIndexX The maximum neighbor tower index in x direction. * @param minNeighborIndexY The minimum neighbor tower index in y direction. * @param maxNeighborIndexY The maximum neighbor tower index in y direction. * @param useNewton3 Specifies, whether neighbor lists should contain only forward neighbors. * @param function Function to apply on every neighbor tower. Typically this is calculateNeighborsBetweenTowers(). */ template <class FunType> void iterateNeighborTowers(const int towerIndexX, const int towerIndexY, const int minNeighborIndexX, const int maxNeighborIndexX, const int minNeighborIndexY, const int maxNeighborIndexY, const bool useNewton3, FunType function) { auto &tower = getTower(towerIndexX, towerIndexY); // for all neighbor towers for (int neighborIndexY = minNeighborIndexY; neighborIndexY <= maxNeighborIndexY; neighborIndexY++) { double distBetweenTowersY = std::max(0, std::abs(towerIndexY - neighborIndexY) - 1) * _towerSideLength; for (int neighborIndexX = minNeighborIndexX; neighborIndexX <= maxNeighborIndexX; neighborIndexX++) { if (useNewton3 and not isForwardNeighbor(towerIndexX, towerIndexY, neighborIndexX, neighborIndexY)) { continue; } double distBetweenTowersX = std::max(0, std::abs(towerIndexX - neighborIndexX) - 1) * _towerSideLength; // calculate distance in xy-plane auto distBetweenTowersXYsqr = distBetweenTowersX * distBetweenTowersX + distBetweenTowersY * distBetweenTowersY; // skip if already longer than interactionLength if (distBetweenTowersXYsqr <= _interactionLengthSqr) { auto &neighborTower = getTower(neighborIndexX, neighborIndexY); function(tower, neighborTower, distBetweenTowersXYsqr, useNewton3); } } } } /** * Returns the index of a imagined interaction cell with side length equal the interaction length that contains the * given tower. * @param towerIndexX The x index of the given tower. * @param towerIndexY The y index of the given tower. * @return The index of the interaction cell containing the given tower. */ int get1DInteractionCellIndexForTower(const int towerIndexX, const int towerIndexY) { const int interactionCellTowerX = towerIndexX / _interactionLengthInTowers; const int interactionCellTowerY = towerIndexY / _interactionLengthInTowers; const int numInteractionCellsX = static_cast<int>(std::ceil(_towersPerDim[0] / (double)_interactionLengthInTowers)); return interactionCellTowerX + numInteractionCellsX * interactionCellTowerY; } /** * Decides if a given neighbor tower is a forward neighbor to a given tower. * A forward neighbor is either in a interaction cell with a higher index * or in the same interaction cell with a higher tower index. * * Helps the VCLC06Traversal to have no data races. * * @param towerIndexX The x-index of the given tower. * @param towerIndexY The y-index of the given tower. * @param neighborIndexX The x-index of the given neighbor tower. * @param neighborIndexY The y-index of the given neighbor tower. * @return True, if neighbor is a forward neighbor of tower. */ bool isForwardNeighbor(const int towerIndexX, const int towerIndexY, const int neighborIndexX, const int neighborIndexY) { auto interactionCellTowerIndex1D = get1DInteractionCellIndexForTower(towerIndexX, towerIndexY); auto interactionCellNeighborIndex1D = get1DInteractionCellIndexForTower(neighborIndexX, neighborIndexY); if (interactionCellNeighborIndex1D > interactionCellTowerIndex1D) { return true; } else if (interactionCellNeighborIndex1D < interactionCellTowerIndex1D) { return false; } // else if (interactionCellNeighborIndex1D == interactionCellTowerIndex1D) ... auto towerIndex1D = towerIndex2DTo1D(towerIndexX, towerIndexY); auto neighborIndex1D = towerIndex2DTo1D(neighborIndexX, neighborIndexY); return neighborIndex1D >= towerIndex1D; } /** * Calculates for all clusters in the given tower: * - all neighbor clusters within the interaction length that are contained in the given neighbor tower. * * @param towerA The given tower. * @param towerB The given neighbor tower. * @param distBetweenTowersXYsqr The distance in the xy-plane between the towers. * @param useNewton3 Specifies, whether neighbor lists should use newton3. This changes the way what the lists * contain. If an cluster A interacts with cluster B, then this interaction will either show up only once in the * interaction lists of the custers (for newton3 == true) or show up in the interaction lists of both (for newton3 == * false) */ void calculateNeighborsBetweenTowers(internal::ClusterTower<Particle> &towerA, internal::ClusterTower<Particle> &towerB, double distBetweenTowersXYsqr, bool useNewton3) { const bool isSameTower = (&towerA == &towerB); for (size_t clusterIndexInTowerA = 0; clusterIndexInTowerA < towerA.getNumClusters(); clusterIndexInTowerA++) { // if we are within one tower depending on newton3 only look at forward neighbors auto startClusterIndexInTowerB = isSameTower and useNewton3 ? clusterIndexInTowerA + 1 : 0; auto &clusterA = towerA.getCluster(clusterIndexInTowerA); auto [clusterABoxBottom, clusterABoxTop, clusterAContainsParticles] = clusterA.getZMinMax(); if (clusterAContainsParticles) { for (size_t clusterIndexInTowerB = startClusterIndexInTowerB; clusterIndexInTowerB < towerB.getNumClusters(); clusterIndexInTowerB++) { // a cluster cannot be a neighbor to itself // If newton3 is true this is not possible because of the choice of the start index. if (not useNewton3 and isSameTower and clusterIndexInTowerA == clusterIndexInTowerB) { continue; } auto &clusterB = towerB.getCluster(clusterIndexInTowerB); auto [clusterBBoxBottom, clusterBBoxTop, clusterBcontainsParticles] = clusterB.getZMinMax(); if (clusterBcontainsParticles) { double distZ = bboxDistance(clusterABoxBottom, clusterABoxTop, clusterBBoxBottom, clusterBBoxTop); if (distBetweenTowersXYsqr + distZ * distZ <= _interactionLengthSqr) { clusterA.addNeighbor(clusterB); } } } } } } /** * Calculates the distance of two bounding boxes in one dimension. Assumes disjoint bounding boxes. * * @param min1 minimum coordinate of first bbox in tested dimension * @param max1 maximum coordinate of first bbox in tested dimension * @param min2 minimum coordinate of second bbox in tested dimension * @param max2 maximum coordinate of second bbox in tested dimension * @return distance */ [[nodiscard]] double bboxDistance(const double min1, const double max1, const double min2, const double max2) const { if (max1 < min2) { return min2 - max1; } else if (min1 > max2) { return min1 - max2; } else { return 0; } } /** * Returns the tower the given 3D coordinates are in. * If the location is outside of the domain, the tower nearest tower is returned. * * @param location The 3D coordinates. * @return Tower reference. */ auto &getTower(std::array<double, 3> location) { auto [towerIndexX, towerIndexY] = getTowerCoordinates(location); return getTower(towerIndexX, towerIndexY); } /** * Returns the coordinates of the tower in the tower grid the given 3D coordinates are in. * If the location is outside of the domain, the tower nearest tower is returned. * * @param location The 3D coordinates. * @return Tower reference. */ std::array<size_t, 2> getTowerCoordinates(std::array<double, 3> location) { std::array<size_t, 2> towerIndex2D{}; for (int dim = 0; dim < 2; dim++) { const auto towerDimIndex = static_cast<long int>(floor((location[dim] - _haloBoxMin[dim]) * _towerSideLengthReciprocal)); const auto towerDimIndexNonNegative = static_cast<size_t>(std::max(towerDimIndex, 0l)); const auto towerDimIndexNonLargerValue = std::min(towerDimIndexNonNegative, _towersPerDim[dim] - 1); towerIndex2D[dim] = towerDimIndexNonLargerValue; /// @todo this is a sanity check to prevent doubling of particles, but could be done better! e.g. by border and // flag manager if (location[dim] >= _haloBoxMax[dim]) { towerIndex2D[dim] = _towersPerDim[dim] - 1; } else if (location[dim] < _haloBoxMin[dim]) { towerIndex2D[dim] = 0; } } return towerIndex2D; } /** * Returns the 1D index for the given 2D tower index. * * @param x The x-index of the tower. * @param y The y-index of the tower. * @return 1D index for _towers vector. */ size_t towerIndex2DTo1D(const size_t x, const size_t y) { // It is necessary to use the static method in VerletClusterLists here instead of the member method, because // _towersPerDim does not have the new value yet in the container. return VerletClusterLists<Particle>::towerIndex2DTo1D(x, y, _towersPerDim); } /** * Returns the tower for the given 2D tower index. * @param x The x-index of the tower. * @param y The y-index of the tower. * @return Tower reference. */ auto &getTower(const size_t x, const size_t y) { return _towers[towerIndex2DTo1D(x, y)]; } }; } // namespace internal } // namespace autopas
boxloop_cuda.h
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header info for the BoxLoop * *****************************************************************************/ /*-------------------------------------------------------------------------- * BoxLoop macros: *--------------------------------------------------------------------------*/ #ifndef HYPRE_NEWBOXLOOP_HEADER #define HYPRE_NEWBOXLOOP_HEADER #define HYPRE_LAMBDA [=] __host__ __device__ #define BLOCKSIZE 512 typedef struct hypre_Boxloop_struct { HYPRE_Int lsize0,lsize1,lsize2; HYPRE_Int strides0,strides1,strides2; HYPRE_Int bstart0,bstart1,bstart2; HYPRE_Int bsize0,bsize1,bsize2; } hypre_Boxloop; #if 1 #define hypre_fence() /*printf("\n hypre_newBoxLoop in %s(%d) function %s\n",__FILE__,__LINE__,__FUNCTION__);*/ #else #define hypre_fence() \ { \ cudaError err = cudaGetLastError(); \ if ( cudaSuccess != err ) \ { \ printf("\n ERROR hypre_newBoxLoop: %s in %s(%d) function %s\n",cudaGetErrorString(err),__FILE__,__LINE__,__FUNCTION__); \ /* HYPRE_Int *p = NULL; *p = 1; */ \ } \ HYPRE_CUDA_CALL( cudaDeviceSynchronize() ); \ } #endif /* #define hypre_reduce_policy cuda_reduce<BLOCKSIZE> */ #ifdef __cplusplus extern "C++" { #endif template <typename LOOP_BODY> __global__ void forall_kernel(LOOP_BODY loop_body, HYPRE_Int length) { HYPRE_Int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < length) { loop_body(idx); } } template<typename LOOP_BODY> void BoxLoopforall(HYPRE_ExecutionPolicy policy, HYPRE_Int length, LOOP_BODY loop_body) { if (policy == HYPRE_EXEC_HOST) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx); } } else if (policy == HYPRE_EXEC_DEVICE) { HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE; const dim3 gDim(gridSize), bDim(BLOCKSIZE); HYPRE_CUDA_LAUNCH( forall_kernel, gDim, bDim, loop_body, length ); } } template <typename LOOP_BODY> __global__ void reductionforall_kernel(LOOP_BODY ReductionLoop, HYPRE_Int length) { ReductionLoop(blockDim.x*blockIdx.x+threadIdx.x, blockDim.x*gridDim.x, length); } template<typename LOOP_BODY> void ReductionBoxLoopforall(HYPRE_ExecutionPolicy policy, HYPRE_Int length, LOOP_BODY ReductionLoop) { if (length <= 0) { return; } if (policy == HYPRE_EXEC_HOST) { hypre_assert(0); } else if (policy == HYPRE_EXEC_DEVICE) { HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE; gridSize = hypre_min(gridSize, 1024); /* hypre_printf("length= %d, blocksize = %d, gridsize = %d\n", length, BLOCKSIZE, gridSize); */ const dim3 gDim(gridSize), bDim(BLOCKSIZE); HYPRE_CUDA_LAUNCH( reductionforall_kernel, gDim, bDim, ReductionLoop, length ); } } #ifdef __cplusplus } #endif #define hypre_BoxLoopIncK(k,box,hypre__i) \ HYPRE_Int hypre_boxD##k = 1; \ HYPRE_Int hypre__i = 0; \ hypre__i += (hypre_IndexD(local_idx, 0)*box.strides0 + box.bstart0) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \ hypre__i += (hypre_IndexD(local_idx, 1)*box.strides1 + box.bstart1) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \ hypre__i += (hypre_IndexD(local_idx, 2)*box.strides2 + box.bstart2) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); #define hypre_newBoxLoopInit(ndim,loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \ hypre__tot *= loop_size[hypre_d]; #define hypre_BasicBoxLoopInit(ndim,loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \ hypre__tot *= loop_size[hypre_d]; \ #define hypre_newBoxLoopDeclare(box) \ hypre_Index local_idx; \ HYPRE_Int idx_local = idx; \ hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \ idx_local = idx_local / box.lsize0; \ hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \ idx_local = idx_local / box.lsize1; \ hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \ #define hypre_newBoxLoop0Begin(ndim, loop_size) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_newBoxLoop0End() \ }); \ hypre_fence(); \ } #define hypre_BoxLoopDataDeclareK(k,ndim,loop_size,dbox,start,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = start[0] - dbox->imin[0]; \ databox##k.bsize0 = dbox->imax[0]-dbox->imin[0]; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = start[1] - dbox->imin[1]; \ databox##k.bsize1 = dbox->imax[1]-dbox->imin[1]; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = start[2] - dbox->imin[2]; \ databox##k.bsize2 = dbox->imax[2]-dbox->imin[2]; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define hypre_newBoxLoop1Begin(ndim, loop_size, \ dbox1, start1, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); #define hypre_newBoxLoop1End(i1) \ }); \ hypre_fence(); \ } #define hypre_newBoxLoop2Begin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); #define hypre_newBoxLoop2End(i1, i2) \ }); \ hypre_fence(); \ } #define hypre_newBoxLoop3Begin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); \ hypre_BoxLoopIncK(3,databox3,i3); #define hypre_newBoxLoop3End(i1, i2,i3) \ }); \ hypre_fence(); \ } #define hypre_newBoxLoop4Begin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3, \ dbox4, start4, stride4, i4) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \ hypre_BoxLoopDataDeclareK(4,ndim,loop_size,dbox4,start4,stride4); \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); \ hypre_BoxLoopIncK(3,databox3,i3); \ hypre_BoxLoopIncK(4,databox4,i4); #define hypre_newBoxLoop4End(i1, i2, i3, i4) \ }); \ hypre_fence(); \ } #define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = 0; \ databox##k.bsize0 = 0; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define zypre_newBasicBoxLoop1Begin(ndim, loop_size, \ stride1, i1) \ { \ hypre_BasicBoxLoopInit(ndim,loop_size); \ zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ #define zypre_newBasicBoxLoop2Begin(ndim, loop_size, \ stride1, i1, \ stride2, i2) \ { \ hypre_BasicBoxLoopInit(ndim,loop_size); \ zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \ zypre_BasicBoxLoopDataDeclareK(2,ndim,loop_size,stride2); \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); \ #define hypre_LoopBegin(size,idx) \ { \ BoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()),size,HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_LoopEnd() \ }); \ hypre_fence(); \ } #define hypre_newBoxLoopGetIndex(index) \ index[0] = hypre_IndexD(local_idx, 0); index[1] = hypre_IndexD(local_idx, 1); index[2] = hypre_IndexD(local_idx, 2); #define hypre_BoxLoopBlock() 0 #define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin #define hypre_BoxLoop0For hypre_newBoxLoop0For #define hypre_BoxLoop0End hypre_newBoxLoop0End #define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin #define hypre_BoxLoop1For hypre_newBoxLoop1For #define hypre_BoxLoop1End hypre_newBoxLoop1End #define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin #define hypre_BoxLoop2For hypre_newBoxLoop2For #define hypre_BoxLoop2End hypre_newBoxLoop2End #define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin #define hypre_BoxLoop3For hypre_newBoxLoop3For #define hypre_BoxLoop3End hypre_newBoxLoop3End #define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin #define hypre_BoxLoop4For hypre_newBoxLoop4For #define hypre_BoxLoop4End hypre_newBoxLoop4End #define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin #define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin /* Reduction BoxLoop1*/ #define hypre_BoxLoop1ReductionBegin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ reducesum) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \ ReductionBoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()), hypre__tot, \ HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \ HYPRE_Int len) \ { \ for (HYPRE_Int idx = tid; \ idx < len; \ idx += nthreads) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); #define hypre_BoxLoop1ReductionEnd(i1, reducesum) \ } \ reducesum.BlockReduce(); \ }); \ hypre_fence(); \ } /* Reduction BoxLoop2 */ #define hypre_BoxLoop2ReductionBegin(ndim, loop_size, \ dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ reducesum) \ { \ hypre_newBoxLoopInit(ndim,loop_size); \ hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \ hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \ reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \ ReductionBoxLoopforall(hypre_HandleStructExecPolicy(hypre_handle()), hypre__tot, \ HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \ HYPRE_Int len) \ { \ for (HYPRE_Int idx = tid; \ idx < len; \ idx += nthreads) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1,databox1,i1); \ hypre_BoxLoopIncK(2,databox2,i2); #define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \ } \ reducesum.BlockReduce(); \ }); \ hypre_fence(); \ } #endif
biphasic_filling_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Kazem Kamran // Riccardo Rossi // Pooyan Dadvand // Jordi Rubio // #if !defined(KRATOS_BIPHASIC_FILLING_UTILITIES_INCLUDED ) #define KRATOS_BIPHASIC_FILLING_UTILITIES_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "utilities/enrichment_utilities.h" #include "utilities/timer.h" //#include "geometries/tetrahedra_3d_4.h" #include "geometries/point.h" #include "thermo_mechanical_application.h" // #include "custom_conditions/environment_contact.h" //#include "includes/variables.h" #include "../incompressible_fluid_application/custom_utilities/parallel_extrapolation_utilities.h" #include "includes/kratos_flags.h" #include "includes/c2c_variables.h" #include "includes/cfd_variables.h" namespace Kratos { class BiphasicFillingUtilities { public: KRATOS_CLASS_POINTER_DEFINITION(BiphasicFillingUtilities); //********************************************************************************************** //********************************************************************************************** double CreateAutoExitAssignAirSmagorinsky(ModelPart& ThisModelPart, double y_wall, double C_Smagorinsky) { KRATOS_TRY; /* AirSmagorinskey(ThisModelPart, C_Smagorinsky);*/ int node_size = ThisModelPart.Nodes().size(); double is_exit = 0.0; for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; double str_flag = it->GetValue(IS_STRUCTURE); double slip_flag = it->GetSolutionStepValue(IS_SLIP); if (str_flag == 0.0 && slip_flag>=10.0) { is_exit = 1.0; // return 1.0; } } //syncronoze is_exit = ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(is_exit); if(is_exit == 1.0) return 1.0; // if there is no dry node double is_dry_node = 0.0; #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; double dist = it->FastGetSolutionStepValue(DISTANCE); double slip_flag = it->GetSolutionStepValue(IS_SLIP); double str_flag = it->GetValue(IS_STRUCTURE); if(dist > 0.0) { is_dry_node = 1.0; //slip_flag=10.0 refers to the boundary nodes with well-defined normal if(slip_flag==10.0 && str_flag!=0.0) { it->SetValue(IS_STRUCTURE,0.0); it->SetValue(Y_WALL,y_wall); } } } //syncronoze is_dry_node = ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(is_dry_node); //assign smagorinsky at air element AirSmagorinskey(ThisModelPart, C_Smagorinsky); return is_dry_node; KRATOS_CATCH("") } //********************************************************************************************** /*for node in fluid_model_part.Nodes: slip_flag = node.GetSolutionStepValue(IS_SLIP) nd_dist = node.GetSolutionStepValue(DISTANCE) if((slip_flag == 20.0 or slip_flag == 30.0 )):# if(nd_dist< 0.0): node.SetValue(IS_STRUCTURE,1.0) node.SetValue(Y_WALL,y_wall_val) else: node.SetValue(IS_STRUCTURE,0.0) node.SetValue(Y_WALL,y_wall_val*y_wall_fac) if(slip_flag == 10.0): if(nd_dist< 0.0): node.SetValue(Y_WALL,y_wall_val) node.SetValue(IS_STRUCTURE,1.0) else: node.SetValue(Y_WALL,y_wall_val*y_wall_fac) node.SetValue(IS_STRUCTURE,0.0)*/ //********************************************************************************************** double AssignSmoothBoundaryAirExit(ModelPart& ThisModelPart, bool air_exit_flag, const double y_wall_val, const double y_wall_fac) { KRATOS_TRY; // int node_size = ThisModelPart.Nodes().size(); int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); double is_str = 1.0; if(air_exit_flag) is_str = 0.0; int wet_nodes = 0; #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { // ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; double dist = it->FastGetSolutionStepValue(DISTANCE); double slip_flag = it->GetSolutionStepValue(IS_SLIP); if(dist<0.0) { #pragma omp atomic wet_nodes++; } if(slip_flag == 20.0 || slip_flag == 30.0 )//edges(20) and corners(30) are automatic air exits till they are wetten if(dist<0.0) { it->SetValue(IS_STRUCTURE,1.0); it->SetValue(Y_WALL,y_wall_val); } else { it->SetValue(IS_STRUCTURE,0.0); it->SetValue(Y_WALL,y_wall_val*y_wall_fac); } else if(slip_flag == 10.0)//smooth boundaries(10), if dry, can be air exit or not { if(dist<0.0) { it->SetValue(IS_STRUCTURE,1.0); it->SetValue(Y_WALL,y_wall_val); } else { it->SetValue(IS_STRUCTURE,is_str); it->SetValue(Y_WALL,y_wall_val*y_wall_fac); }//y_wall_val*y_wall_fac } //filling time //double is_visited = it->FastGetSolutionStepValue(IS_VISITED); //if(is_visited == 0.0 && dist<=0.0) //{ // it->FastGetSolutionStepValue(IS_VISITED) = 1.0; // double filling_time = ThisModelPart.GetProcessInfo()[TIME]; // it->FastGetSolutionStepValue(FILLTIME) = filling_time; //} } //syncronoze wet_nodes = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(wet_nodes); node_size = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(node_size); double filling_percent = 0.0; if(wet_nodes != 0) filling_percent = 100.0*double(wet_nodes)/double(node_size); return filling_percent; KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** double ComputeFillPercentage(ModelPart& ThisModelPart, const double corrected_time ) { KRATOS_TRY int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); int wet_nodes = 0; #pragma omp parallel for firstprivate(node_size) reduction(+:wet_nodes) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; double dist = it->FastGetSolutionStepValue(DISTANCE); if(dist<0.0) wet_nodes++; //filling time double is_visited = it->FastGetSolutionStepValue(IS_VISITED); if(is_visited == 0.0 && dist<=0.0) { it->FastGetSolutionStepValue(IS_VISITED) = 1.0; //double filling_time = ThisModelPart.GetProcessInfo()[TIME]; //it->FastGetSolutionStepValue(FILLTIME) = filling_time*time_correction_factor; it->FastGetSolutionStepValue(FILLTIME) =corrected_time; } } //syncronoze wet_nodes = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(wet_nodes); node_size = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(node_size); double filling_percent = 0.0; if(wet_nodes != 0) filling_percent = 100.0*double(wet_nodes)/double(node_size); return filling_percent; KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ApplyFluidProperties(ModelPart& ThisModelPart, const double water_mu, const double water_density ,const double air_mu,const double air_density) { KRATOS_TRY; int node_size = ThisModelPart.Nodes().size(); #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; double dist = it->FastGetSolutionStepValue(DISTANCE); if(dist<=0.0) { it->FastGetSolutionStepValue(DENSITY) = water_density; it->FastGetSolutionStepValue(VISCOSITY) = water_mu; } else { it->FastGetSolutionStepValue(DENSITY) = air_density; it->FastGetSolutionStepValue(VISCOSITY) = air_mu; } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void DistanceFarRegionCorrection(ModelPart& ThisModelPart, const double max_dist) { KRATOS_TRY; int node_size = ThisModelPart.Nodes().size(); // double max_cutted_elem_size = 0.0; //max_cutted_elem_size = ComputeCharactristicCuttedLength(ThisModelPart); #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; double& current_dist = it->FastGetSolutionStepValue(DISTANCE); const double old_dist = it->FastGetSolutionStepValue(DISTANCE,1); // if( fabs(old_dist) >= CFL*max_cutted_elem_size && current_dist*old_dist <= 0.0) if( fabs(old_dist) >= max_dist && current_dist*old_dist <= 0.0) current_dist = old_dist; } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** double sign(const double& a) { if(a < 0) return -1.0; else return 1.0; } //********************************************************************************************** //********************************************************************************************** void VolumeCorrection(ModelPart& ThisModelPart, const double Net_volume, const double max_correction, const bool CorrectNegativeVolume=false) { KRATOS_TRY double wet_volume = 0.0; double wet_volume_old=wet_volume; double cutted_area = 0.0; double wet_volume_left=0.0; double wet_volume_right=0.0; double tol=1e-5; double tolv=5e-3; double lower_correction; double upper_correction; int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); // First we compute the Total Volume of the Fluid // First we compute the Total Volume of the Fluid ComputeWetVolumeAndCuttedArea(ThisModelPart, wet_volume, cutted_area); // Now we compute the difference between the Total Volume and the volume that has enetered through the inlet double volume_difference = (fabs(Net_volume) - wet_volume); // First guess in correction double correction = volume_difference/cutted_area; double correction_old=correction; //double signcorrection=sign(correction); //Maximum signed correction double maximum_signed_correction=fabs(max_correction)*(sign(correction)); //Way of obtaining sign(x) // If correction is greater than signed correction, we keep the maximum correction.If the wet volume exceeds the correction, then we start from maximum_signed_correction bool exit_loop=false; // Just to skip the iterations if((correction>maximum_signed_correction)&&(correction>0)){ correction=maximum_signed_correction; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume,cutted_area,correction); lower_correction=0.0; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_left,cutted_area,lower_correction); upper_correction=correction; wet_volume_right=wet_volume; if(fabs(Net_volume)>wet_volume_right){exit_loop=true;} } if((correction<maximum_signed_correction)&&(correction<0)){ correction=maximum_signed_correction; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume,cutted_area,correction); upper_correction=0.0; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_right,cutted_area,upper_correction); lower_correction=correction; wet_volume_left=wet_volume; if(fabs(Net_volume)<wet_volume_left){exit_loop=true;} } // Now we find the left and right limits if(exit_loop==false) { double extreme_correction; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume,cutted_area,correction); if(correction>0) { extreme_correction=fabs(max_correction); if(wet_volume<=fabs(Net_volume)){ lower_correction=correction; wet_volume_left=wet_volume; upper_correction=extreme_correction; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_right,cutted_area,extreme_correction); } else { lower_correction=0.0; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_left,cutted_area,0.0); wet_volume_right=wet_volume; upper_correction=correction; } } else { extreme_correction=-fabs(max_correction); if(wet_volume>=fabs(Net_volume)){ upper_correction=correction; wet_volume_right=wet_volume; lower_correction=extreme_correction; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_left,cutted_area,extreme_correction); } else { upper_correction=0.0; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_right,cutted_area,0.0); wet_volume_left=wet_volume; lower_correction=correction; } } ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume,cutted_area,correction); ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_right,cutted_area,upper_correction); ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_left,cutted_area,lower_correction); } // Now we loop until convergence unsigned int iteration=0; //double inc_correction=1000.0; while((iteration<10)&&(exit_loop==false)) { correction_old=correction; wet_volume_old=wet_volume; double aux_vol_r=wet_volume_right-fabs(Net_volume); double aux_vol_l=wet_volume_left-fabs(Net_volume); correction=(aux_vol_r*lower_correction-aux_vol_l*upper_correction)/(wet_volume_right-wet_volume_left); if((correction<lower_correction)||(correction>upper_correction)){KRATOS_WATCH("ERROR CORRECTING VOLUME IN VOLUME_CORRECTION");} ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume,cutted_area,correction); volume_difference = fabs(Net_volume) - wet_volume; if(fabs(Net_volume)>wet_volume){ lower_correction=correction; wet_volume_left=wet_volume; } else{ upper_correction=correction; wet_volume_right=wet_volume; } //Now the middle point just in case double middle_point=(lower_correction+upper_correction)/2.0; double wet_volume_middle=0.0; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume_middle,cutted_area,middle_point); if(wet_volume_middle<fabs(Net_volume)){ lower_correction=middle_point; wet_volume_left=wet_volume_middle; } else{ upper_correction=middle_point; wet_volume_right=wet_volume_middle; } //inc_correction=upper_correction-lower_correction; iteration++; if((fabs(correction_old-correction)<tol)&&((fabs(wet_volume-wet_volume_old)/wet_volume_old)<tolv)){ exit_loop=true; //std::cout << "Volume Correction performed: it= "<< iteration <<" Correction =" << correction << " Wet_volume =" << wet_volume << " Net Volume =" << fabs(Net_volume) << std::endl; } } //BLOCK TO BE MODIFIED, JUST COMPUTE CORRECTION IF IT SUPPOSED TO DO IT //Now we set the correction to be 0 if it is negative -> if it is positive, the distance of point near 0 positive becomes negotive, so that the front advances if(CorrectNegativeVolume==false && correction<0) { correction=0.0; ComputeVolumeAndCuttedAreaInDistance(ThisModelPart,wet_volume,cutted_area,correction); } // END OF BLOCK TO BE MODIFIED // Now we correct the distances #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; it->FastGetSolutionStepValue(DISTANCE) -= correction; } ThisModelPart.GetCommunicator().SynchronizeVariable(DISTANCE); ThisModelPart.GetProcessInfo()[CUTTED_AREA] =cutted_area ; ThisModelPart.GetProcessInfo()[WET_VOLUME] = wet_volume; if (ThisModelPart.GetCommunicator().MyPID() == 0) std::cout << "Volume correction : " << correction << " in " << iteration<< std::endl; //std::cout << "Volume Correction " << " Net volume: "<< fabs(Net_volume) << " wet volume: " << wet_volume << " percent: "<< wet_volume/fabs(Net_volume)<< " Area: "<< cutted_area << std::endl; KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void PosetiveVolumeCorrection(ModelPart& ThisModelPart, const double Net_volume, const double max_correction) { KRATOS_TRY; double wet_volume = 0.0; double cutted_area = 0.0; // int node_size = ThisModelPart.Nodes().size(); int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); // #pragma omp parallel for firstprivate(node_size) reduction(+:wet_volume,cutted_area ) // for (int ii = 0; ii < node_size; ii++) // { // ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; // // wet_volume += it->FastGetSolutionStepValue(WET_VOLUME); // cutted_area += it->FastGetSolutionStepValue(CUTTED_AREA); // } ComputePosVolumeAndCuttedArea(ThisModelPart, wet_volume, cutted_area); double volume_difference = fabs(Net_volume) - wet_volume; double correction = volume_difference/cutted_area; if(correction > max_correction) correction = max_correction; if(correction < -max_correction) correction = -max_correction; ThisModelPart.GetProcessInfo()[CUTTED_AREA] =cutted_area ; ThisModelPart.GetProcessInfo()[WET_VOLUME] = wet_volume; const double liquidus_temp = ThisModelPart.GetProcessInfo()[FLUID_TEMPERATURE]; //volume loss is just corrected if(volume_difference > 0.0) { // TODO: this is not correct in MPI parallel #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { // ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; // double alpha = it->FastGetSolutionStepValue(DP_ALPHA1); double dist = it->FastGetSolutionStepValue(DISTANCE); if(dist < 0 && (dist+correction)>0 ) it->FastGetSolutionStepValue(TEMPERATURE) = liquidus_temp; dist += correction; } } std::cout << "Volume Correction " << " Net volume: "<< fabs(Net_volume) << " wet volume: " << wet_volume << " percent: "<< wet_volume/fabs(Net_volume)<< " Area: "<< cutted_area << std::endl; KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ComputeNetInletVolume(ModelPart& ThisModelPart) { KRATOS_TRY; double net_input = 0.0; /* int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); #pragma omp parallel for firstprivate(node_size) reduction(+:net_input) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; //double str_flag = it->GetValue(IS_STRUCTURE); //double slip_flag = it->GetSolutionStepValue(IS_SLIP); //double distance = it->GetSolutionStepValue(DISTANCE); // if ( (str_flag != 0.0 || slip_flag == 0.0) && distance < 0.0 ) if ( it->Is(INLET) ) { const array_1d<double, 3> vel = it->FastGetSolutionStepValue(VELOCITY); const array_1d<double, 3> normal = it->FastGetSolutionStepValue(NORMAL); net_input += inner_prod(vel,normal); } } //syncronoze ThisModelPart.GetCommunicator().SumAll(net_input);*/ for (ModelPart::ConditionIterator iCond = ThisModelPart.ConditionsBegin(); iCond != ThisModelPart.ConditionsEnd(); iCond++) { if (iCond->GetValue(IS_INLET) != 0.0) { Geometry< Node<3> >& rGeometry = iCond->GetGeometry(); array_1d<double,3> v1, v2, AreaNormal; v1[0] = rGeometry[1].X() - rGeometry[0].X(); v1[1] = rGeometry[1].Y() - rGeometry[0].Y(); v1[2] = rGeometry[1].Z() - rGeometry[0].Z(); v2[0] = rGeometry[2].X() - rGeometry[0].X(); v2[1] = rGeometry[2].Y() - rGeometry[0].Y(); v2[2] = rGeometry[2].Z() - rGeometry[0].Z(); MathUtils<double>::CrossProduct(AreaNormal,v1,v2); AreaNormal *= 0.5; array_1d<double,3> Velocity(3,0.0); for (unsigned int i = 0; i < rGeometry.PointsNumber(); i++) Velocity += rGeometry[i].FastGetSolutionStepValue(VELOCITY); Velocity /= 3.0; net_input -= Velocity[0]*AreaNormal[0] + Velocity[1]*AreaNormal[1] + Velocity[2]*AreaNormal[2]; } } net_input = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(net_input); ProcessInfo& CurrentProcessInfo = ThisModelPart.GetProcessInfo(); const double delta_t = CurrentProcessInfo[DELTA_TIME]; double& net_volume = CurrentProcessInfo[NET_INPUT_MATERIAL]; net_volume += (net_input*delta_t); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** /**This function applies a velocity reduction. Velocity is not allowed to be greater in modulus * than old_vel_norm + max_acc_modulus * dt * the function is designed to palliate the effect of errors in the solution of the linear system * which result in unphysical velocity peaks, typically located at edges. */ void ApplyVelocityLimitation(ModelPart& ThisModelPart, const double max_acc_modulus) { KRATOS_TRY; // double net_input = 0.0; int node_size = ThisModelPart.Nodes().size(); const double dt = ThisModelPart.GetProcessInfo()[DELTA_TIME]; #pragma omp parallel for firstprivate(node_size) //reduction(+:net_input) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; if(!it->IsFixed(VELOCITY_X) && !it->IsFixed(VELOCITY_Y) && !it->IsFixed(VELOCITY_Z)) { array_1d<double, 3>& vel = it->FastGetSolutionStepValue(VELOCITY); const array_1d<double, 3>& old_vel = it->FastGetSolutionStepValue(VELOCITY,1); const double current_vel_norm = norm_2(vel); const double old_vel_norm = norm_2(old_vel); const double slip_flag = it->FastGetSolutionStepValue(IS_SLIP); if(slip_flag > 11.0) //edge or corners -- here we reduce by a factor of 6 the max acceleration { const double acceptable_vel_norm = old_vel_norm + 0.1666667*max_acc_modulus*dt; const double ratio = current_vel_norm/acceptable_vel_norm; //velocity is reduced if too high if(ratio > 1.0) vel /= ratio; } else { const double acceptable_vel_norm = old_vel_norm + max_acc_modulus*dt; const double ratio = current_vel_norm/acceptable_vel_norm; //velocity is reduced if too high if(ratio > 1.0) vel /= ratio; } } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ComputeNodalVolume(ModelPart& ThisModelPart) { KRATOS_TRY; //first of all set to zero the nodal variables to be updated nodally for (ModelPart::NodeIterator i = ThisModelPart.NodesBegin(); i != ThisModelPart.NodesEnd(); ++i) { (i)->GetValue(NODAL_VOLUME) = 0.00; } for (ModelPart::ElementIterator i = ThisModelPart.ElementsBegin(); i != ThisModelPart.ElementsEnd(); ++i) { Geometry< Node<3> >& rGeometry = i->GetGeometry(); double volume = 0.25 * rGeometry.DomainSize()/3.0;//Attention DomainSize() Returns JAcobian/2.0, Volume is Jacobian/6.0 for (int jj =0; jj<4; ++jj) rGeometry[jj].GetValue(NODAL_VOLUME) += volume; } //ThisModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_VOLUME); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** int SolidificationDuringFilling(ModelPart& ThisModelPart, double BandWidth) { KRATOS_TRY; //Check for stop criteria return StopSolidifCriteria(ThisModelPart,BandWidth); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void LastStepExtrapolations(ModelPart& ThisModelPart, const double corrected_time ) { KRATOS_TRY; //Check if there is any dry node /* Defining for Solid_fraction_extrapolation*/ int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); double is_wet = 1.0; #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; double dist = it->FastGetSolutionStepValue(DISTANCE); if(dist>=0.0) { is_wet = 0.0; it->GetValue(FILLTIME)=corrected_time; //dist = -1.0; } } //syncronoze is_wet = ThisModelPart.GetCommunicator().GetDataCommunicator().MinAll(is_wet); //If there is a dry node then do extrapolation for velocity if(is_wet == 0.0) { ParallelExtrapolationUtilities<3>::ExtrapolateTemperature(ThisModelPart, DISTANCE, TEMPERATURE, NODAL_AREA,10); //ParallelExtrapolationUtilities<3>::ParallelExtrapolationUtilities().ExtrapolateVelocity(ThisModelPart, DISTANCE, VELOCITY, NODAL_AREA,10); #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; double& dist = it->FastGetSolutionStepValue(DISTANCE); if (dist >= 0.0) { dist = -1.0; } // //filling time // double is_visited = it->FastGetSolutionStepValue(IS_VISITED); // if(is_visited == 0.0 && dist<=0.0) // { // it->FastGetSolutionStepValue(IS_VISITED) = 1.0; // //double filling_time = ThisModelPart.GetProcessInfo()[TIME]; // //it->FastGetSolutionStepValue(FILLTIME) = filling_time * time_correction_factor; //it->GetValue(FILLTIME) =corrected_time; // } } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ViscosityBasedSolidification(ModelPart& ThisModelPart, double ViscosityFactor) { KRATOS_TRY; int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; //double temperature = it->FastGetSolutionStepValue(TEMPERATURE); const double dist = it->FastGetSolutionStepValue(DISTANCE); if(dist<=0.0) { const double alpha = 1.0 + pow(it->FastGetSolutionStepValue(SOLIDFRACTION),2); double& visc = it->FastGetSolutionStepValue(VISCOSITY); visc *= alpha*ViscosityFactor; //visc *= (ViscosityFactor - (ViscosityFactor -1.0)*(1.0 - alpha)); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** //void MacroPorosityToShrinkageComputation(ModelPart& ThisModelPart, ModelPart::NodesContainerType& visited_nodes, unsigned int division_number) //{ // KRATOS_TRY; // double max_porosity = -1000.0; // double min_porosity = 1000000.0; // ModelPart::NodesContainerType& r_nodes = ThisModelPart.Nodes(); // for(ModelPart::NodesContainerType::iterator i_node = r_nodes.begin(); i_node!=r_nodes.end(); i_node++) // { // i_node->Set(NOT_VISITED); // const double mcp = i_node->FastGetSolutionStepValue(MACRO_POROSITY); // if(max_porosity <= mcp) // max_porosity = mcp; // if(min_porosity >= mcp && mcp != 0.0) // min_porosity = mcp; // } // // double min_porosity = 0.01*max_porosity; ///* double step_length = (max_porosity - min_porosity)/double(division_number); // double floor_mp = min_porosity;// + step_length; // for(unsigned int cnt = 1; cnt <= division_number; cnt++) // { // double cnt_val = min_porosity + double(cnt) * step_length; // // for(ModelPart::NodesContainerType::iterator i_node = r_nodes.begin(); i_node!=r_nodes.end(); i_node++) // { // const double nd_mcp = i_node->FastGetSolutionStepValue(MACRO_POROSITY); // if( nd_mcp >= floor_mp && i_node->IsNot(VISITED) ) // { // if( nd_mcp <= cnt_val) // { // i_node->Set(VISITED); // i_node->FastGetSolutionStepValue(SHRINKAGE_POROSITY) = cnt_val; // visited_nodes.push_back(*(i_node.base())); // } // } // } // // }*/ // //double floor_mp = min_porosity;// + step_length; // for(ModelPart::NodesContainerType::iterator i_node = r_nodes.begin(); i_node!=r_nodes.end(); i_node++) // { // const double nd_mcp = i_node->FastGetSolutionStepValue(MACRO_POROSITY); // if( nd_mcp >= floor_mp ) // { // i_node->FastGetSolutionStepValue(SHRINKAGE_POROSITY) = nd_mcp; // visited_nodes.push_back(*(i_node.base())); // } // } ///*for(ModelPart::NodesContainerType::iterator i_node = r_nodes.begin(); i_node!=r_nodes.end(); i_node++) //{ // const double nd_mcp = i_node->FastGetSolutionStepValue(MACRO_POROSITY); // if(i_node->IsNot(VISITED) && nd_mcp <= floor_mp) // i_node->FastGetSolutionStepValue(SHRINKAGE_POROSITY) = std::numeric_limits<double>::infinity(); //}*/ // KRATOS_CATCH("") //} ////********************************************************************************************** //********************************************************************************************** void ComputePosetiveVolume(ModelPart& ThisModelPart) { KRATOS_TRY; // //set as active the internal nodes // int node_size = ThisModelPart.Nodes().size(); // #pragma omp parallel for // for (int i = 0; i < node_size; i++) // { // ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + i; // it->FastGetSolutionStepValue(NODAL_MASS) = 0.0; // } // int elem_size = ThisModelPart.Elements().size(); // array_1d<double, 4 > N; // BoundedMatrix <double, 4, 3> DN_DX; //#pragma omp parallel for private(DN_DX,N) firstprivate(elem_size) // for (int i = 0; i < elem_size; i++) // { // PointerVector< Element>::iterator it = ThisModelPart.ElementsBegin() + i; // Geometry<Node < 3 > >&geom = it->GetGeometry(); // double Volume; // GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); // for (unsigned int k = 0; k < 4; k++) // { // geom[k].SetLock(); // geom[k].FastGetSolutionStepValue(NODAL_MASS) += Volume*0.25; // geom[k].UnSetLock(); // } // } // double net_input = 0.0; //ModelPart::NodesContainerType& r_nodes = ThisModelPart.Nodes(); // for(ModelPart::NodesContainerType::iterator i_node = r_nodes.begin(); i_node!=r_nodes.end(); i_node++) // { // double distance = i_node->GetSolutionStepValue(DISTANCE); // if(distance >= 0.0) // { // double nd_vol = i_node->GetSolutionStepValue(NODAL_MASS); // net_input += nd_vol; // } //} //ProcessInfo& CurrentProcessInfo = ThisModelPart.GetProcessInfo(); //double& net_volume = CurrentProcessInfo[NET_INPUT_MATERIAL]; //net_volume = net_input; //KRATOS_WATCH(net_volume); // //int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); // //#pragma omp parallel for firstprivate(node_size) reduction(+:net_input) // //for (int ii = 0; ii < node_size; ii++) // //{ // // ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; // // double str_flag = it->GetValue(IS_STRUCTURE); // // double slip_flag = it->GetSolutionStepValue(IS_SLIP); // // double distance = it->GetSolutionStepValue(DISTANCE); // // // if ( (str_flag != 0.0 || slip_flag == 0.0) && distance < 0.0 ) // // if ( it->Is(INLET) ) // // { // // const array_1d<double, 3> vel = it->FastGetSolutionStepValue(VELOCITY); // // const array_1d<double, 3> normal = it->FastGetSolutionStepValue(NORMAL); // // net_input += inner_prod(vel,normal); // // } // //} // ////syncronoze // //ThisModelPart.GetCommunicator().SumAll(net_input); // //ProcessInfo& CurrentProcessInfo = ThisModelPart.GetProcessInfo(); // //const double delta_t = CurrentProcessInfo[DELTA_TIME]; // //double& net_volume = CurrentProcessInfo[NET_INPUT_MATERIAL]; // //net_volume += (net_input*delta_t); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** /**This function correct temperature in case that temperature at a node is maximum than * FLUID_TEMPERATURE which is the inlet temperature. It simply replaces the temperature of the * previous step in this case. */ void ApplyTemperatureLimitation(ModelPart& ThisModelPart, const double max_temperature, const double min_temperature) { KRATOS_TRY; int node_size = ThisModelPart.Nodes().size(); #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; double& current_temp = it->FastGetSolutionStepValue(TEMPERATURE); if( current_temp > max_temperature || current_temp < min_temperature )//1.05*fluid_temp { double old_temp = it->FastGetSolutionStepValue(TEMPERATURE,1); current_temp = old_temp; } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** double CheckIfAllNodesAreWet(ModelPart& ThisModelPart) { KRATOS_TRY; int node_size = ThisModelPart.Nodes().size(); // if there is no dry node double is_dry_node = 0.0; #pragma omp parallel for firstprivate(node_size) for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.NodesBegin() + ii; double dist = it->FastGetSolutionStepValue(DISTANCE); if(dist > 0.0) is_dry_node = 1.0; } //syncronoze return ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(is_dry_node); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** /* THIS FUNCTION COMPUTES THE WET VOLUME */ double ComputeWetVolume(ModelPart& ThisModelPart) { double wet_volume = 0.0; double cutted_area = 0.0; // First we compute the Total Volume of the Fluid // First we compute the Total Volume of the Fluid ComputeWetVolumeAndCuttedArea(ThisModelPart, wet_volume, cutted_area); return wet_volume; } //********************************************************************************************** //********************************************************************************************** double ComputePartVolume(ModelPart& ThisModelPart) { double vol=0.0; ModelPart::ElementIterator ibegin = ThisModelPart.ElementsBegin(); unsigned int size_to_loop=ThisModelPart.Elements().size(); KRATOS_TRY; #pragma omp parallel for reduction(+: vol) for (int k = 0; k < static_cast<int>(size_to_loop); ++k) { ModelPart::ElementIterator i = ibegin+k; Geometry< Node<3> >& rGeometry = i->GetGeometry(); double elem_volume = rGeometry.DomainSize(); // Looks like now it is repaired 3.0;//Attention DomainSize() Returns JAcobian/2.0, Volume is Jacobian/6.0 vol+=elem_volume; } KRATOS_CATCH("") return vol; } //********************************************************************************************** //********************************************************************************************** double ComputePartArea(ModelPart& ThisModelPart) { double area=0.0; ModelPart::ConditionIterator ibegin = ThisModelPart.ConditionsBegin(); unsigned int size_to_loop=ThisModelPart.Conditions().size(); KRATOS_TRY; #pragma omp parallel for reduction(+: area) for (int k=0; k <static_cast<int>(size_to_loop); ++k) { ModelPart::ConditionIterator i=ibegin+k; Geometry< Node<3> >& rGeometry = i->GetGeometry(); double condition_area = rGeometry.DomainSize(); // Looks like now it is repaired 3.0;//Attention DomainSize() Returns JAcobian/2.0, Volume is Jacobian/6.0 area+=condition_area; } KRATOS_CATCH("") return area; } //********************************************************************************************** //********************************************************************************************** double ComputePartInletArea(ModelPart& ThisModelPart) { double area=0.0; ModelPart::ConditionIterator ibegin = ThisModelPart.ConditionsBegin(); unsigned int size_to_loop=ThisModelPart.Conditions().size(); KRATOS_TRY; #pragma omp parallel for reduction(+: area) for (int k=0; k <static_cast<int>(size_to_loop); ++k) { ModelPart::ConditionIterator i=ibegin+k; Geometry< Node<3> >& rGeometry = i->GetGeometry(); double condition_area = rGeometry.DomainSize(); // Looks like now it is repaired 3.0;//Attention DomainSize() Returns JAcobian/2.0, Volume is Jacobian/6.0 if(i->GetValue(IS_INLET)>0.0){ area+=condition_area;} } KRATOS_CATCH("") return area; } //********************************************************************************************** //********************************************************************************************** double ComputePartMaxh(ModelPart& ThisModelPart) { KRATOS_TRY double h_max = 0.0; for (ModelPart::ElementsContainerType::iterator it = ThisModelPart.ElementsBegin(); it != ThisModelPart.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double h = 0.0; for (unsigned int i = 0; i<4; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for (unsigned int j = i + 1; j<4; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > h) h = l; } } h = sqrt(h); if (h > h_max) h_max = h; } return ThisModelPart.GetCommunicator().GetDataCommunicator().MaxAll(h_max); KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** double ComputePartAvgh(ModelPart& ThisModelPart) { KRATOS_TRY double h_avg = 0.0; unsigned int n_edges = 0; // It will count held for (ModelPart::ElementsContainerType::iterator it = ThisModelPart.ElementsBegin(); it != ThisModelPart.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); //double h = 0.0; for (unsigned int i = 0; i<4; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for (unsigned int j = i + 1; j<4 ; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); l = sqrt(l); h_avg += l; n_edges += 1; } } //h = sqrt(h); //h_avg += h; } double n = static_cast<double>(n_edges); h_avg = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(h_avg); n = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(n); h_avg /= n; return h_avg; KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** private: // void AssignDecelerateFactor(ModelPart& ThisModelPart) // { // KRATOS_TRY; // // //double solidus_temp = ThisModelPart.GetTable(3).Data().front().first; // //double liquidus_temp = ThisModelPart.GetTable(3).Data().back().first; // double solidus_temp=ThisModelPart.GetProcessInfo().GetValue(FLUID_TEMPERATURE); // double liquidus_temp=ThisModelPart.GetProcessInfo().GetValue(SOLID_TEMPERATURE); // //double temp_t = liquidus_temp; // //if ( solidus_temp > liquidus_temp) // //{ // // liquidus_temp = solidus_temp; // // solidus_temp = temp_t; // //} // // // int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); // // #pragma omp parallel for firstprivate(node_size) // for (int ii = 0; ii < node_size; ii++) // { // double alpha = 0.0; // ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; // // double dist = it->FastGetSolutionStepValue(DISTANCE); // if( dist<=0.0) // { // double temperature = it->FastGetSolutionStepValue(TEMPERATURE); // // // if(temperature < solidus_temp) alpha = 1.0; // else if( temperature >= solidus_temp && temperature < liquidus_temp) alpha = 1.0-(temperature - solidus_temp)/(liquidus_temp - solidus_temp); // // } // it->FastGetSolutionStepValue(DP_ALPHA1) = alpha; // } // // KRATOS_CATCH("") // } //********************************************************************************************** //********************************************************************************************** // void VelocityReduction(ModelPart& ThisModelPart) // { // KRATOS_TRY; // // int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); // // #pragma omp parallel for firstprivate(node_size) // for (int ii = 0; ii < node_size; ii++) // { // ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; // //double temperature = it->FastGetSolutionStepValue(TEMPERATURE); // double alpha = it->FastGetSolutionStepValue(DP_ALPHA1); // // if(alpha >= 0.9){ // it->FastGetSolutionStepValue(VELOCITY_X) = 0.0; // it->FastGetSolutionStepValue(VELOCITY_Y) = 0.0; // it->FastGetSolutionStepValue(VELOCITY_Z) = 0.0; // it->Fix(VELOCITY_X); // it->Fix(VELOCITY_Y); // it->Fix(VELOCITY_Z); // } // else if(alpha<= 0.9 && alpha > 0.1) // { // double& Vx = it->FastGetSolutionStepValue(VELOCITY_X); // double& Vy = it->FastGetSolutionStepValue(VELOCITY_Y); // double& Vz = it->FastGetSolutionStepValue(VELOCITY_Z); // Vx *= (1.0-alpha); // Vy *= (1.0-alpha); // Vz *= (1.0-alpha); // // it->Fix(VELOCITY_X); // it->Fix(VELOCITY_Y); // it->Fix(VELOCITY_Z); // } // } // KRATOS_CATCH("") // } //********************************************************************************************** //********************************************************************************************** int StopSolidifCriteria(ModelPart& ThisModelPart, double ref_dist) { KRATOS_TRY; int is_hot = 0; int node_size = ThisModelPart.GetCommunicator().LocalMesh().Nodes().size(); //CAN NOT DO THIS IN PARALLEL! it is just wrong!!! for (int ii = 0; ii < node_size; ii++) { ModelPart::NodesContainerType::iterator it = ThisModelPart.GetCommunicator().LocalMesh().NodesBegin() + ii; //double temperature = it->FastGetSolutionStepValue(TEMPERATURE); double distance = it->FastGetSolutionStepValue(DISTANCE); if(distance <= 0.0 && fabs(distance) <= ref_dist){ double solid_fraction = it->FastGetSolutionStepValue(SOLIDFRACTION); if(solid_fraction < 0.9) { is_hot=1; break; } } } ThisModelPart.GetCommunicator().MaxAll(is_hot); return is_hot; KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void AirSmagorinskey(ModelPart& ThisModelPart, double C_Smagorinsky) { int elem_size = ThisModelPart.Elements().size(); #pragma omp parallel for firstprivate(elem_size) for(int ii = 0; ii<elem_size; ii++) { PointerVector< Element>::iterator iel=ThisModelPart.ElementsBegin()+ii; double dist_sign = 1.0; Geometry< Node<3> >& geom = iel->GetGeometry(); for(unsigned int i =0; i<geom.size(); i++) { double dist = geom[i].FastGetSolutionStepValue(DISTANCE); if(dist_sign*dist < 0.0) { dist_sign = -1.0; break; } } // to be sure to not apply to the cutted elements and just to all air elements if(dist_sign == 1.0) iel->SetValue(C_SMAGORINSKY, C_Smagorinsky ); } } //********************************************************************************************** //********************************************************************************************** void ComputeWetVolumeAndCuttedArea(ModelPart& ThisModelPart, double& wet_volume, double& cutted_area) { KRATOS_TRY; int elem_size = ThisModelPart.Elements().size(); double wetvol = 0.0; double cutare = 0.0; #pragma omp parallel for firstprivate(elem_size) reduction(+:wetvol,cutare) for(int ii = 0; ii<elem_size; ii++) { PointerVector< Element>::iterator iel=ThisModelPart.ElementsBegin()+ii; // Calculate this element's geometric parameters double Area; array_1d<double, 4> N; BoundedMatrix<double, 4, 3> DN_DX; GeometryUtils::CalculateGeometryData(iel->GetGeometry(), DN_DX, N, Area); //get position of the cut surface Vector distances(4); Matrix Nenriched(6, 1); Vector volumes(6); Matrix coords(4, 3); Matrix Ngauss(6, 4); Vector signs(6); std::vector< Matrix > gauss_gradients(6); //fill coordinates for (unsigned int i = 0; i < 4; i++) { const array_1d<double, 3 > & xyz = iel->GetGeometry()[i].Coordinates(); volumes[i] = 0.0; distances[i] = iel->GetGeometry()[i].FastGetSolutionStepValue(DISTANCE); for (unsigned int j = 0; j < 3; j++) coords(i, j) = xyz[j]; } for (unsigned int i = 0; i < 6; i++) gauss_gradients[i].resize(1, 3, false); array_1d<double,6> edge_areas; unsigned int ndivisions = EnrichmentUtilities::CalculateTetrahedraEnrichedShapeFuncions(coords, DN_DX, distances, volumes, Ngauss, signs, gauss_gradients, Nenriched,edge_areas); if(ndivisions == 1) { if( signs[0] < 0.0) wetvol += volumes[0]; } else { double ele_wet_volume=0.0; for (unsigned int kk = 0; kk < ndivisions; kk++) { if( signs[kk]<0.0 ) ele_wet_volume += volumes[kk]; } wetvol += ele_wet_volume; for(unsigned int i=0; i<6; i++) cutare += edge_areas[i]; //cutare += 1.80140543 * pow(ele_wet_volume,0.666666666667); // equilateral tetrahedraon is considered } } //syncronoze wet_volume = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(wetvol); cutted_area = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(cutare); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ComputeVolumeAndCuttedAreaInDistance(ModelPart& ThisModelPart, double& wet_volume, double& cutted_area, const double& reference_distance) { KRATOS_TRY; int elem_size = ThisModelPart.Elements().size(); double wetvol = 0.0; double cutare = 0.0; #pragma omp parallel for firstprivate(elem_size) reduction(+:wetvol,cutare) for(int ii = 0; ii<elem_size; ii++) { PointerVector< Element>::iterator iel=ThisModelPart.ElementsBegin()+ii; // Calculate this element's geometric parameters double Area; array_1d<double, 4> N; BoundedMatrix<double, 4, 3> DN_DX; GeometryUtils::CalculateGeometryData(iel->GetGeometry(), DN_DX, N, Area); //get position of the cut surface Vector distances = ZeroVector(4); Matrix Nenriched = ZeroMatrix(6, 1); Vector volumes = ZeroVector(6); Matrix coords = ZeroMatrix(4, 3); Matrix Ngauss = ZeroMatrix(6, 4); Vector signs = ZeroVector(6); std::vector< Matrix > gauss_gradients(6); //fill coordinates for (unsigned int i = 0; i < 4; i++) { const array_1d<double, 3 > & xyz = iel->GetGeometry()[i].Coordinates(); volumes[i] = 0.0; distances[i] = (iel->GetGeometry()[i].FastGetSolutionStepValue(DISTANCE))-reference_distance; for (unsigned int j = 0; j < 3; j++) coords(i, j) = xyz[j]; } for (unsigned int i = 0; i < 6; i++) { gauss_gradients[i].resize(1, 3, false); gauss_gradients[i] = ZeroMatrix(1,3); } array_1d<double,6> edge_areas; unsigned int ndivisions = EnrichmentUtilities::CalculateTetrahedraEnrichedShapeFuncions(coords, DN_DX, distances, volumes, Ngauss, signs, gauss_gradients, Nenriched,edge_areas); if(ndivisions == 1) { if( signs[0] < 0.0) wetvol += volumes[0]; } else { double ele_wet_volume=0.0; for (unsigned int kk = 0; kk < ndivisions; kk++) { if( signs[kk]<0.0 ) ele_wet_volume += volumes[kk]; } wetvol += ele_wet_volume; for(unsigned int i=0; i<6; i++) cutare += edge_areas[i]; //cutare += 1.80140543 * pow(ele_wet_volume,0.666666666667); // equilateral tetrahedraon is considered } } //syncronoze wet_volume = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(wetvol); cutted_area = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(cutare); KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ComputePosVolumeAndCuttedArea(ModelPart& ThisModelPart, double& pos_volume, double& cutted_area) { KRATOS_TRY; int elem_size = ThisModelPart.Elements().size(); double wetvol = 0.0; double cutare = 0.0; #pragma omp parallel for firstprivate(elem_size) reduction(+:wetvol,cutare) for(int ii = 0; ii<elem_size; ii++) { PointerVector< Element>::iterator iel=ThisModelPart.ElementsBegin()+ii; // Calculate this element's geometric parameters double Area; array_1d<double, 4> N; BoundedMatrix<double, 4, 3> DN_DX; GeometryUtils::CalculateGeometryData(iel->GetGeometry(), DN_DX, N, Area); //get position of the cut surface Vector distances(4); Matrix Nenriched(6, 1); Vector volumes(6); Matrix coords(4, 3); Matrix Ngauss(6, 4); Vector signs(6); std::vector< Matrix > gauss_gradients(6); //fill coordinates for (unsigned int i = 0; i < 4; i++) { const array_1d<double, 3 > & xyz = iel->GetGeometry()[i].Coordinates(); volumes[i] = 0.0; distances[i] = iel->GetGeometry()[i].FastGetSolutionStepValue(DISTANCE); for (unsigned int j = 0; j < 3; j++) coords(i, j) = xyz[j]; } for (unsigned int i = 0; i < 6; i++) gauss_gradients[i].resize(1, 3, false); array_1d<double,6> edge_areas; unsigned int ndivisions = EnrichmentUtilities::CalculateTetrahedraEnrichedShapeFuncions(coords, DN_DX, distances, volumes, Ngauss, signs, gauss_gradients, Nenriched,edge_areas); if(ndivisions == 1) { if( signs[0] > 0.0) wetvol += volumes[0]; } else { double ele_wet_volume=0.0; for (unsigned int kk = 0; kk < ndivisions; kk++) { if( signs[kk]>0.0 ) ele_wet_volume += volumes[kk]; } wetvol += ele_wet_volume; for(unsigned int i=0; i<6; i++) cutare += edge_areas[i]; //cutare += 1.80140543 * pow(ele_wet_volume,0.666666666667); // equilateral tetrahedraon is considered } } //syncronoze pos_volume = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(wetvol); cutted_area = ThisModelPart.GetCommunicator().GetDataCommunicator().SumAll(cutare); KRATOS_CATCH("") } double ComputeCharactristicCuttedLength(ModelPart& ThisModelPart) { KRATOS_TRY; double max_cutted_len = 0.0; double cnt = 0.0; array_1d<double,4> dist; int elem_size = ThisModelPart.Elements().size(); #pragma omp parallel for private(dist) firstprivate(elem_size) for (int i = 0; i < elem_size; i++) { PointerVector< Element>::iterator it = ThisModelPart.ElementsBegin() + i; Geometry<Node < 3 > >& element_geometry = it->GetGeometry(); for (unsigned int i = 0; i < 4; i++) dist[i] = element_geometry[i].FastGetSolutionStepValue(DISTANCE); bool is_divided = IsDivided(dist); if (is_divided == true) { #pragma omp atomic cnt+= 1.0; double max_pos_dist = 0.0; double min_neg_dist = 0.0; for (unsigned int ii = 0; ii < 4; ii++) { if ( dist[ii] > max_pos_dist) max_pos_dist = dist[ii]; else if( dist[ii] < min_neg_dist) min_neg_dist = dist[ii]; } double this_ele_dist = max_pos_dist - min_neg_dist ; if(this_ele_dist > max_cutted_len) max_cutted_len = this_ele_dist; } } return max_cutted_len; KRATOS_CATCH("") } bool IsDivided(array_1d<double,4>& dist) { unsigned int positive = 0; unsigned int negative = 0; for(unsigned int i=0; i<4; i++) { if(dist[i] >= 0) positive++; else negative++; } bool is_divided = false; if(positive > 0 && negative>0) is_divided = true; return is_divided; } void CorrectTemperatureInAddedVolume(ModelPart& ThisModelPart, const double correction) { //compute min temp in wet part ( edge and corners are excluded) double min_wet_temp = ThisModelPart.GetProcessInfo()[FLUID_TEMPERATURE]; #pragma omp parallel for for (int k = 0; k< static_cast<int> (ThisModelPart.Nodes().size()); k++) { ModelPart::NodesContainerType::iterator i_node = ThisModelPart.NodesBegin() + k; double distance = i_node->GetSolutionStepValue(DISTANCE); double slip_falg = i_node->GetSolutionStepValue(IS_SLIP); if(distance <=0.0 && slip_falg!=20.0 && slip_falg!=30.0) { double temp_node = i_node->FastGetSolutionStepValue(TEMPERATURE); if (temp_node < min_wet_temp) min_wet_temp = temp_node; } } //assign to TEMPERATURE in the added zone #pragma omp parallel for for (int k = 0; k< static_cast<int> (ThisModelPart.Nodes().size()); k++) { ModelPart::NodesContainerType::iterator i_node = ThisModelPart.NodesBegin() + k; double distance = i_node->GetSolutionStepValue(DISTANCE); if(distance >= 0.0 && distance <= 5.0 * correction) { i_node->FastGetSolutionStepValue(TEMPERATURE) = min_wet_temp; i_node->FastGetSolutionStepValue(TEMPERATURE,1) = min_wet_temp; } } } }; } // namespace Kratos. #endif // KRATOS_BIPHASIC_FILLING_UTILITIES_INCLUDED defined
ten_tusscher_2004_epi_S2_2.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S2_2.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5625425078510,0.00129164511648619,0.779570574758225,0.779427091418077,0.000174878991569467,0.485030733457084,0.00294149421393105,0.999998346195388,1.93532833226023e-08,1.89250710693833e-05,0.999770305344151,1.00711648268532,0.999995670118449,4.46785769336173e-05,0.704594271439916,9.53343199663547,139.935102489521}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.9775467344317,0.000166600423473182,0.000157288679125758,0.000709118450301612,0.263558270150583,0.168176898499067,0.121036017649477,3.67579958026615,0.0132247972184402,2.23991491317412,1099.99539877590,0.000482074874077319,0.582903159280657,0.0176425810465345,0.00547174746535614,2.73565215234459e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class IfStmtBitfields { friend class IfStmt; unsigned : NumStmtBits; unsigned IsConstexpr : 1; }; class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 17 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; IfStmtBitfields IfStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; CoawaitExprBitfields CoawaitBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) == sizeof(void *), "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, const ASTContext *Context = nullptr) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *IgnoreImplicit() const { return const_cast<Stmt *>(this)->IgnoreImplicit(); } /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// \brief Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro = false; public: friend class ASTStmtReader; friend class ASTStmtWriter; NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; SourceLocation LBraceLoc, RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt* const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {} SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) {} SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public Stmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt : public Stmt { enum { INIT, VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, bool IsConstexpr, Stmt *init, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) {} /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { INIT, VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, Stmt *Init, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) {} /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) {} /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) {} const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// \brief Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
cut-and-run.c
/*************************************************************************** * Description: * Process a file in pieces using multiple threads * * History: * Date Name Modification * 2021-04-25 Jason Bacon Begin ***************************************************************************/ #include <stdio.h> #include <sysexits.h> #include <string.h> #include <errno.h> #include <stdlib.h> #include <fcntl.h> // open() #include <unistd.h> // read() #include <sys/param.h> // MIN() #include <sys/stat.h> // fstat() #include <omp.h> #include "cut-and-run.h" int main(int argc,char *argv[]) { static long *start_positions; int infd; char *filename, *out_filename, *extension = "", *cmd, *thread_count_str, *end; unsigned thread_count; switch(argc) { case 5: extension = argv[4]; case 4: filename = argv[1]; cmd = argv[2]; out_filename = argv[3]; break; default: usage(argv); } // Get thread count from environment if present, else default if ( (thread_count_str = getenv("OMP_NUM_THREADS")) == NULL ) { thread_count = DEFAULT_THREAD_COUNT; omp_set_num_threads(thread_count); } else { thread_count = strtoul(thread_count_str, &end, 10); if ( *end != '\0' ) { fprintf(stderr, "Invalid OMP_NUM_THREADS: %s.\n", thread_count_str); return EX_DATAERR; } } printf("%u threads\n", thread_count); if ( (infd = open(filename, O_RDONLY)) == -1 ) { fprintf(stderr, "%s: Cannot open %s: %s\n", argv[0], filename, strerror(errno)); return EX_NOINPUT; } // Doesn't help at all //setvbuf(infd, read_buff, _IOFBF, read_buff_size); start_positions = find_start_positions(infd, thread_count); return spawn_processes(filename, cmd, out_filename, extension, start_positions, thread_count); } /*************************************************************************** * Description: * Find the starting position within the input file for each thread. * The file is divided into thread_count blocks of N lines and the * starting position for each thread is the beginning of the first * line in the block. * * History: * Date Name Modification * 2021-04-25 Jason Bacon Begin ***************************************************************************/ long *find_start_positions(int infd, unsigned thread_count) { long *start_positions, // Tracking this is slightly faster than ftell() file_position, eof_position; size_t c, c2, total_lines, lines_per_thread, bytes, max_lines = 1000000, read_buff_size; char *p, *read_buff; struct stat fileinfo; // Allocate conservatively and add on as needed if ( (start_positions = malloc(max_lines * sizeof(*start_positions))) == NULL ) { fputs("find_start_positions(): Cannot allocate start_positions.\n", stderr); exit(EX_UNAVAILABLE); } fstat(infd, &fileinfo); read_buff_size = fileinfo.st_blksize; printf("File system block size = %zu\n", read_buff_size); if ( (read_buff = malloc(read_buff_size + 1)) == NULL ) { fputs("find_start_positions(): Cannot allocate read_buff.\n", stderr); exit(EX_UNAVAILABLE); } file_position = 0; total_lines = 1; start_positions[0] = 0; // First block is beginning of file while ( (bytes = read(infd, read_buff, read_buff_size)) > 0 ) { for (p = read_buff; p < read_buff + bytes; ++p, ++file_position) { if ( *p == '\n' ) { start_positions[total_lines++] = file_position; if ( total_lines == max_lines ) { max_lines *= 2; start_positions = realloc(start_positions, max_lines * sizeof(*start_positions)); if ( start_positions == NULL ) { fputs("find_start_positions(): Cannot allocate start_positions.\n", stderr); exit(EX_UNAVAILABLE); } } } } } eof_position = file_position + 1; lines_per_thread = total_lines / thread_count + 1; printf("Lines per thread: %zu\n", lines_per_thread); /* * Rewinding is not enough. Private thread FILE structures must be * created so that each stream has a different file descriptor. */ close(infd); free(read_buff); // Move the start positions for each thread to the top of the list for (c = 0, c2 = 0; c < total_lines; c += lines_per_thread) { start_positions[c2] = start_positions[c]; // printf("%zu %lu\n", c2, start_positions[c2]); ++c2; } start_positions[c2] = eof_position; /* * Immediately free memory from unused line starts to make Ray happy * This cannot fail since we're shrinking the array */ start_positions = realloc(start_positions, (thread_count + 1) * sizeof(*start_positions)); return start_positions; } int spawn_processes(const char *filename, const char *cmd, const char *out_filename, const char *extension, const long start_positions[], unsigned thread_count) { char thread_count_str[20]; unsigned thread; snprintf(thread_count_str, 19, "%u", thread_count); #pragma omp parallel for for (thread = 0; thread < thread_count; ++thread) { unsigned thread_id; char pipe_cmd[CMD_MAX + 1] = "", *read_buff; FILE *outfile; int infd, outfd; ssize_t bytes, c, my_start, my_end; size_t read_buff_size, read_size; struct stat fileinfo; // Verify that OpenMP has the right thread count thread_id = omp_get_thread_num(); // Copy FILE structure to private variables so they can diverge infd = open(filename, O_RDONLY); fstat(infd, &fileinfo); read_buff_size = fileinfo.st_blksize; if ( (read_buff = malloc(read_buff_size + 1)) == NULL ) { fputs("find_start_positions(): Cannot allocate read_buff.\n", stderr); exit(EX_UNAVAILABLE); } // Open a pipe with popen() or a named pipe with fopen() if ( strcmp(out_filename, "/dev/null") == 0 ) snprintf(pipe_cmd, CMD_MAX, "%s > %s", cmd, out_filename); else snprintf(pipe_cmd, CMD_MAX, "%s > %s%0*u%s", cmd, out_filename, (unsigned)strlen(thread_count_str), thread, extension); if ( (outfile = popen(pipe_cmd, "w")) == NULL ) { fprintf(stderr, "spawn_processes(): Cannot pipe output: %s\n", pipe_cmd); exit(EX_CANTCREAT); } // Use popen() only for convenience. Don't use the FILE stream // returned, but use the file descriptor directly for low-level I/O. outfd = fileno(outfile); // Send chars from this thread's section of the file to the pipe my_start = start_positions[thread]; my_end = start_positions[thread + 1]; lseek(infd, my_start, SEEK_SET); printf("Thread #%u (%u) sending characters %lu to %lu to %s\n", thread, thread_id, my_start, my_end, pipe_cmd); for (c = my_start; c < my_end - 1; c += read_size) { read_size = MIN(read_buff_size, my_end - c); bytes = read(infd, read_buff, read_size); // FIXME: Using read_size should be the same as bytes, but it // inserts one extra character before EOF // printf("%zu %zu\n", read_size, bytes); write(outfd, read_buff, bytes); } close(outfd); // Properly flush low-level buffers pclose(outfile); // Free pipe structures close(infd); free(read_buff); } return EX_OK; } void usage(char *argv[]) { fprintf(stderr, "Usage:\n\n [env OMP_NUM_THREADS=#] \\\n" " %s input-file command output-file-stem [extension]\n\n", argv[0]); fprintf(stderr, "\"cmd\" is any command that reads from stdin and writes to stdout\n" "\"extension\" is an optional filename extension for each output file.\n\n" "Actual output file for thread N is output-file-stemN[extension]\n" "unless output file is /dev/null, in which case it is unaltered.\n\n" "Example:\n\n %s input.fa cat output- .fa\n" " Produces output files output-1.fa, output-2.fa, ...\n\n", argv[0]); exit(EX_USAGE); }
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1) / 2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this] { return !queue_.empty(); }); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) {} Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_) ->set_request_handle(std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle(std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands( static_cast<KVStoreServerProfilerCommand>(recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const& stored_entry : store_) { const int key = stored_entry.first; const NDArray& stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto& stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto& update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const& stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i = 0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter " << parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i = 0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: DataHandleDefault(type, req_meta, req_data, server); break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, const ps::KVPairs<char>& req_data, UpdateBuf* update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t)ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_) { exec_.Exec([this, key, &update, &stored]() { CHECK(updater_); updater_(key, update, &stored); }); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } /** * Request can be for either push, pull or pushpull * If pull flag is set, respond immediately with the updated values * Otherwise, only send the notification */ bool has_pull = false; for (const auto& req : update_buf->request) { has_pull = has_pull || req.pull; } if (has_pull) { // if there is a pull request, perform WaitToRead() once before DefaultStorageResponse if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); for (const auto& req : update_buf->request) { if (req.pull) { DefaultStorageResponse(type, key, req, req_data, server); } } update_buf->request.clear(); } else { // otherwise, send response directly for (const auto& req : update_buf->request) { server->Response(req); } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key>& keys, int64_t* indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char*>(stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t)unit_len}; mxnet::TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu>* s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t)num_rows, (size_t)unit_len}; mxnet::TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = mxnet::TShape{(int64_t)original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t)req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto& updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
single.c
#include <stdio.h> #include "omp_testsuite.h" int check_single (FILE * logFile) { int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; #pragma omp parallel private(i) { for (i = 0; i < LOOPCOUNT; i++) { #pragma omp single { #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; } /* end of single */ } /* end of for */ } /* end of parallel */ return (result == 0) && (nr_iterations == LOOPCOUNT); } /* end of check_single */ int crosscheck_single (FILE * logFile) { int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; #pragma omp parallel private(i) { for (i = 0; i < LOOPCOUNT; i++) { { #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; } /* end of single */ } /* end of for */ } /* end of parallel */ return (result == 0) && (nr_iterations == LOOPCOUNT); } /* end of check_single */
ten_tusscher_2004_epi_S2_5.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S2_5.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
forced_unroll.c
#include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include "constants.h" /** * Deinterleave (transpose) an IQUV ring buffer page to the ordering needed for FITS files * Note that this is probably a slow function, and is not meant to be run real-time * * data in: tab, channel/4, time/500 packets of time,channel,pn * data out: tab, channel, pol, time * * Suggested use is: * 1. realtime: ringbuffer -> [trigger] -> dada_dbdisk * 2. offline: dada_dbdisk -> ringbuffer -> dadafits * * @param {const char *} page Ringbuffer page with interleaved data * @param {const char *} transposed * @param {int} ntabs Number of tabs * @param {int} nchannels Number of channels * @param {int} npackets Number of packets per sequence */ void deinterleave (const unsigned char *page, unsigned char *transposed, const int ntabs, const int nchannels, const int npackets) { const unsigned char *packet = page; int tab = 0; for (tab = 0; tab < ntabs; tab++) { int channel_offset = 0; for (channel_offset = 0; channel_offset < nchannels; channel_offset+=4) { unsigned char *intermediate = &transposed[(tab * nchannels + channel_offset)*NPOLS*npackets*NSAMPS]; int sequence_number = 0; for (sequence_number = 0; sequence_number < npackets; sequence_number++) { // process packet: int tn; #pragma omp parallel for for (tn = 0; tn < NSAMPS; tn++) { // 500 samples per packet intermediate[( 0 * NPOLS + 0) * npackets * NSAMPS + tn] = *packet++; intermediate[( 0 * NPOLS + 1) * npackets * NSAMPS + tn] = *packet++; intermediate[( 0 * NPOLS + 2) * npackets * NSAMPS + tn] = *packet++; intermediate[( 0 * NPOLS + 3) * npackets * NSAMPS + tn] = *packet++; intermediate[( 1 * NPOLS + 0) * npackets * NSAMPS + tn] = *packet++; intermediate[( 1 * NPOLS + 1) * npackets * NSAMPS + tn] = *packet++; intermediate[( 1 * NPOLS + 2) * npackets * NSAMPS + tn] = *packet++; intermediate[( 1 * NPOLS + 3) * npackets * NSAMPS + tn] = *packet++; intermediate[( 2 * NPOLS + 0) * npackets * NSAMPS + tn] = *packet++; intermediate[( 2 * NPOLS + 1) * npackets * NSAMPS + tn] = *packet++; intermediate[( 2 * NPOLS + 2) * npackets * NSAMPS + tn] = *packet++; intermediate[( 2 * NPOLS + 3) * npackets * NSAMPS + tn] = *packet++; intermediate[( 3 * NPOLS + 0) * npackets * NSAMPS + tn] = *packet++; intermediate[( 3 * NPOLS + 1) * npackets * NSAMPS + tn] = *packet++; intermediate[( 3 * NPOLS + 2) * npackets * NSAMPS + tn] = *packet++; intermediate[( 3 * NPOLS + 3) * npackets * NSAMPS + tn] = *packet++; } // tn } // sequence number } // channel_offset } // tab }
boxFilter_OPSAT_SoA.h
#pragma once #include "boxFilter.hpp" //one pass box filtering SoA class boxFilter_OPSAT_SoA { protected: cv::Mat src; cv::Mat dest; int r; int parallelType; float div; int row; int col; int cn; int loop; std::vector<cv::Mat> vSrc; std::vector<cv::Mat> vDest; virtual void filter_impl(cv::Mat& input, cv::Mat& output); public: boxFilter_OPSAT_SoA(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : src(_src), dest(_dest), r(_r), parallelType(_parallelType) { div = 1.f / ((2 * r + 1)*(2 * r + 1)); row = src.rows; col = src.cols; cn = src.channels(); init(); } virtual void init() { loop = cn; vSrc.resize(loop); vDest.resize(loop); for (int i = 0; i < loop; i++) { vSrc[i].create(src.size(), CV_32FC1); vDest[i].create(src.size(), CV_32FC1); } } virtual void AoS2SoA(); virtual void SoA2AoS(); void filter() { AoS2SoA(); if (parallelType == ParallelTypes::NAIVE) { for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } else if (parallelType == ParallelTypes::OMP) { #pragma omp parallel for for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } SoA2AoS(); } void filterOnly() { if (parallelType == ParallelTypes::NAIVE) { for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } else if (parallelType == ParallelTypes::OMP) { #pragma omp parallel for for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } } }; class boxFilter_OPSAT_SoA_SSE : public boxFilter_OPSAT_SoA { private: __m128 mDiv; __m128 mBorder; void filter_impl(cv::Mat& input, cv::Mat& output) override; public: boxFilter_OPSAT_SoA_SSE(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : boxFilter_OPSAT_SoA(_src, _dest, _r, _parallelType) { init(); } void init() override { mDiv = _mm_set1_ps(div); mBorder = _mm_set1_ps(static_cast<float>(r + 1)); loop = cn >> 2; vSrc.resize(loop); vDest.resize(loop); for (int i = 0; i < loop; i++) { vSrc[i].create(src.size(), CV_32FC4); vDest[i].create(src.size(), CV_32FC4); } } void AoS2SoA() override; void SoA2AoS() override; }; class boxFilter_OPSAT_SoA_AVX : public boxFilter_OPSAT_SoA { private: __m256 mDiv; __m256 mBorder; void filter_impl(cv::Mat& input, cv::Mat& output) override; public: boxFilter_OPSAT_SoA_AVX(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : boxFilter_OPSAT_SoA(_src, _dest, _r, _parallelType) { init(); } void init() override { mDiv = _mm256_set1_ps(div); mBorder = _mm256_set1_ps(static_cast<float>(r + 1)); loop = cn >> 3; vSrc.resize(loop); vDest.resize(loop); for (int i = 0; i < loop; i++) { vSrc[i].create(src.size(), CV_32FC(8)); vDest[i].create(src.size(), CV_32FC(8)); } } void AoS2SoA() override; void SoA2AoS() override; };
GB_Matrix_diag.c
//------------------------------------------------------------------------------ // GB_Matrix_diag: construct a diagonal matrix from a vector //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_WORK \ GB_FREE_WERK (&Tx, Tx_size) ; #define GB_FREE_ALL \ GB_FREE_WORK ; \ GB_phbix_free (C) ; #include "GB_diag.h" GrB_Info GB_Matrix_diag // construct a diagonal matrix from a vector ( GrB_Matrix C, // output matrix const GrB_Matrix V, // input vector (as an n-by-1 matrix) int64_t k, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT_MATRIX_OK (C, "C input for GB_Matrix_diag", GB0) ; ASSERT_MATRIX_OK (V, "V input for GB_Matrix_diag", GB0) ; ASSERT (GB_VECTOR_OK (V)) ; // V is a vector on input ASSERT (!GB_aliased (C, V)) ; // C and V cannot be aliased ASSERT (!GB_IS_HYPERSPARSE (V)) ; // vectors cannot be hypersparse GB_void *restrict Tx = NULL ; size_t Tx_size = 0 ; GrB_Type ctype = C->type ; GrB_Type vtype = V->type ; int64_t nrows = GB_NROWS (C) ; int64_t ncols = GB_NCOLS (C) ; int64_t n = V->vlen + GB_IABS (k) ; // C must be n-by-n if (nrows != ncols || nrows != n) { GB_ERROR (GrB_DIMENSION_MISMATCH, "Input matrix is " GBd "-by-" GBd " but must be " GBd "-by-" GBd "\n", nrows, ncols, n, n) ; } if (!GB_Type_compatible (ctype, vtype)) { GB_ERROR (GrB_DOMAIN_MISMATCH, "Input vector of type [%s] " "cannot be typecast to output of type [%s]\n", vtype->name, ctype->name) ; } //-------------------------------------------------------------------------- // finish any pending work in V and clear the output matrix C //-------------------------------------------------------------------------- GB_MATRIX_WAIT (V) ; GB_phbix_free (C) ; //-------------------------------------------------------------------------- // allocate C as sparse or hypersparse with vnz entries and vnz vectors //-------------------------------------------------------------------------- // C is sparse if V is dense and k == 0, and hypersparse otherwise bool V_is_full = GB_is_dense (V) ; int C_sparsity = (V_is_full && k == 0) ? GxB_SPARSE : GxB_HYPERSPARSE ; int64_t vnz = GB_NNZ (V) ; bool csc = C->is_csc ; float hyper_switch = C->hyper_switch ; float bitmap_switch = C->bitmap_switch ; int sparsity_control = C->sparsity ; bool static_header = C->static_header ; GB_OK (GB_new_bix (&C, static_header, // prior static or dynamic header ctype, n, n, GB_Ap_malloc, csc, C_sparsity, false, hyper_switch, vnz, vnz, true, Context)) ; C->sparsity = sparsity_control ; C->bitmap_switch = bitmap_switch ; //-------------------------------------------------------------------------- // handle the CSR/CSC format of C and determine position of diagonal //-------------------------------------------------------------------------- if (!csc) { // The kth diagonal of a CSC matrix is the same as the (-k)th diagonal // of the CSR format, so if C is CSR, negate the value of k. Then // treat C as if it were CSC in the rest of this method. k = -k ; } int64_t kpositive, knegative ; if (k >= 0) { kpositive = k ; knegative = 0 ; } else { kpositive = 0 ; knegative = -k ; } //-------------------------------------------------------------------------- // get the contents of C and determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (vnz, chunk, nthreads_max) ; int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_Type_code vcode = vtype->code ; GB_Type_code ccode = ctype->code ; size_t vsize = vtype->size ; //-------------------------------------------------------------------------- // copy the contents of V into the kth diagonal of C //-------------------------------------------------------------------------- if (C_sparsity == GxB_SPARSE) { //---------------------------------------------------------------------- // V is full, or can be treated as full, and k == 0 //---------------------------------------------------------------------- // C->x = (ctype) V->x GB_cast_array ((GB_void *) C->x, ccode, (GB_void *) V->x, vcode, NULL, vsize, vnz, nthreads) ; // construct Cp and Ci int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < vnz ; p++) { Cp [p] = p ; Ci [p] = p ; } } else if (V_is_full) { //---------------------------------------------------------------------- // V is full, or can be treated as full, and k != 0 //---------------------------------------------------------------------- // TODO: if V is full and k == 0, then C can be created as sparse, // not hypersparse, and then Ch need not be created. // C->x = (ctype) V->x GB_cast_array ((GB_void *) C->x, ccode, (GB_void *) V->x, vcode, NULL, vsize, vnz, nthreads) ; // construct Cp, Ch, and Ci int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < vnz ; p++) { Cp [p] = p ; Ch [p] = p + kpositive ; Ci [p] = p + knegative ; } } else if (GB_IS_SPARSE (V)) { //---------------------------------------------------------------------- // V is sparse //---------------------------------------------------------------------- // C->x = (ctype) V->x GB_cast_array ((GB_void *) C->x, ccode, (GB_void *) V->x, vcode, NULL, vsize, vnz, nthreads) ; int64_t *restrict Vi = V->i ; // construct Cp, Ch, and Ci int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < vnz ; p++) { Cp [p] = p ; Ch [p] = Vi [p] + kpositive ; Ci [p] = Vi [p] + knegative ; } } else { //---------------------------------------------------------------------- // V is bitmap; convert it to CSC //---------------------------------------------------------------------- ASSERT (GB_IS_BITMAP (V)) ; int64_t Tp [2] ; // allocate workspace for sparse V Tx = GB_MALLOC_WERK (vnz * vsize, GB_void, &Tx_size) ; if (Tx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // use C->i and Tx as output workspace for the sparse V int64_t ignore ; GB_OK (GB_convert_bitmap_worker (Tp, Ci, NULL, Tx, &ignore, V, Context)) ; // C->x = (ctype) Tx GB_cast_array ((GB_void *) C->x, ccode, Tx, vcode, NULL, vsize, vnz, nthreads) ; // construct Cp, Ch, and Ci int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < vnz ; p++) { Cp [p] = p ; Ch [p] = Ci [p] + kpositive ; Ci [p] += knegative ; } } //-------------------------------------------------------------------------- // finalize the matrix C //-------------------------------------------------------------------------- Cp [vnz] = vnz ; C->nvec = vnz ; C->nvec_nonempty = vnz ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // free workspace, conform C to its desired format, and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; ASSERT_MATRIX_OK (C, "C before conform for GB_Matrix_diag", GB0) ; GB_OK (GB_conform (C, Context)) ; ASSERT_MATRIX_OK (C, "C output for GB_Matrix_diag", GB0) ; return (GrB_SUCCESS) ; }
build_JKmat_DF.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <omp.h> #include "linalg_lib_wrapper.h" #include "utils.h" #include "TinyDFT_typedef.h" #include "build_JKmat_DF.h" void TinyDFT_reduce_temp_J(double *temp_J, double *temp_J_thread, int len, int tid, int nthread) { while (nthread > 1) { int mid = (nthread + 1) / 2; int act_mid = nthread / 2; if (tid < act_mid) { double *dst = temp_J_thread + len * mid; #pragma omp simd for (int i = 0; i < len; i++) temp_J_thread[i] += dst[i]; } #pragma omp barrier nthread = mid; } } // Build temporary array for J matrix and form J matrix // Low flop-per-byte ratio: access: nbf^2 * (df_nbf+1), compute: nbf^2 * df_nbf // Note: the J_mat is not completed, the symmetrizing is done later static void TinyDFT_build_Jmat_DF(TinyDFT_p TinyDFT, const double *D_mat, double *J_mat, double *temp_J_t, double *J_mat_t) { int nbf = TinyDFT->nbf; int df_nbf = TinyDFT->df_nbf; int df_nbf_16 = TinyDFT->df_nbf_16; int nthread = TinyDFT->nthread; int *bf_pair_j = TinyDFT->bf_pair_j; int *bf_pair_diag = TinyDFT->bf_pair_diag; int *bf_mask_displs = TinyDFT->bf_mask_displs; double *temp_J = TinyDFT->temp_J; double *df_tensor = TinyDFT->df_tensor; double t0, t1, t2; #pragma omp parallel { int tid = omp_get_thread_num(); #pragma omp master t0 = get_wtime_sec(); // Use thread local buffer (aligned to 128B) to reduce false sharing double *temp_J_thread = temp_J + df_nbf_16 * tid; // Generate temporary array for J memset(temp_J_thread, 0, sizeof(double) * df_nbf); #pragma omp for schedule(dynamic) for (int k = 0; k < nbf; k++) { int diag_k_idx = bf_pair_diag[k]; int idx_kk = k * nbf + k; // Basis function pair (i, i) always survives screening size_t offset = (size_t) diag_k_idx * (size_t) df_nbf; double *df_tensor_row = df_tensor + offset; double D_kl = D_mat[idx_kk]; #pragma omp simd for (size_t p = 0; p < df_nbf; p++) temp_J_thread[p] += D_kl * df_tensor_row[p]; int row_k_epos = bf_mask_displs[k + 1]; for (int l_idx = diag_k_idx + 1; l_idx < row_k_epos; l_idx++) { int l = bf_pair_j[l_idx]; int idx_kl = k * nbf + l; double D_kl = D_mat[idx_kl] * 2.0; size_t offset = (size_t) l_idx * (size_t) df_nbf; double *df_tensor_row = df_tensor + offset; #pragma omp simd for (size_t p = 0; p < df_nbf; p++) temp_J_thread[p] += D_kl * df_tensor_row[p]; } } #pragma omp barrier TinyDFT_reduce_temp_J(temp_J, temp_J_thread, df_nbf_16, tid, nthread); #pragma omp master t1 = get_wtime_sec(); // Build J matrix #pragma omp for schedule(dynamic) for (int i = 0; i < nbf; i++) { int diag_i_idx = bf_pair_diag[i]; int row_i_epos = bf_mask_displs[i + 1]; for (int j_idx = diag_i_idx; j_idx < row_i_epos; j_idx++) { int j = bf_pair_j[j_idx]; size_t offset = (size_t) j_idx * (size_t) df_nbf; double *df_tensor_row = df_tensor + offset; double t = 0; #pragma omp simd for (size_t p = 0; p < df_nbf; p++) t += temp_J[p] * df_tensor_row[p]; J_mat[i * nbf + j] = t; } } #pragma omp master t2 = get_wtime_sec(); } *temp_J_t = t1 - t0; *J_mat_t = t2 - t1; } static void TinyDFT_set_batch_dgemm_temp_K(TinyDFT_p TinyDFT) { int nbf = TinyDFT->nbf; int df_nbf = TinyDFT->df_nbf; int n_occ = TinyDFT->n_occ; int *bf_mask_displs = TinyDFT->bf_mask_displs; double *Cocc_tmp = TinyDFT->pqA; double *df_tensor = TinyDFT->df_tensor; double *temp_K = TinyDFT->temp_K; for (int i = 0; i < nbf; i++) { int row_spos = bf_mask_displs[i]; int row_epos = bf_mask_displs[i + 1]; int row_len = row_epos - row_spos; size_t offset_a = (size_t) row_spos * (size_t) df_nbf; size_t offset_b = (size_t) row_spos * (size_t) df_nbf; size_t offset_c = (size_t) i * (size_t) n_occ * (size_t) df_nbf; double *A_ptr = Cocc_tmp + offset_a; double *B_ptr = df_tensor + offset_b; double *C_ptr = temp_K + offset_c; TinyDFT->mat_K_transa[i] = CblasTrans; TinyDFT->mat_K_transb[i] = CblasNoTrans; TinyDFT->mat_K_m[i] = n_occ; TinyDFT->mat_K_n[i] = df_nbf; TinyDFT->mat_K_k[i] = row_len; TinyDFT->mat_K_alpha[i] = 1.0; TinyDFT->mat_K_beta[i] = 0.0; TinyDFT->mat_K_a[i] = A_ptr; TinyDFT->mat_K_b[i] = B_ptr; TinyDFT->mat_K_c[i] = C_ptr; TinyDFT->mat_K_lda[i] = df_nbf; TinyDFT->mat_K_ldb[i] = df_nbf; TinyDFT->mat_K_ldc[i] = df_nbf; TinyDFT->mat_K_group_size[i] = 1; } } static void TinyDFT_set_batch_dgemm_K(TinyDFT_p TinyDFT, double *K_mat) { int nbf = TinyDFT->nbf; int df_nbf = TinyDFT->df_nbf; int n_occ = TinyDFT->n_occ; int mat_K_BS = TinyDFT->mat_K_BS; int nblock0 = nbf / mat_K_BS; int bs_rem = nbf % mat_K_BS; int *group_size = TinyDFT->mat_K_group_size; group_size[0] = (nblock0 * (nblock0 + 1)) / 2; if (bs_rem > 0) { group_size[1] = nblock0; group_size[2] = 1; } else { group_size[1] = 0; group_size[2] = 0; } double *temp_K = TinyDFT->temp_K; int cnt0 = 0, cnt1 = group_size[0]; int cnt2 = group_size[0] + group_size[1]; for (int i = 0; i < nbf; i += mat_K_BS) { int i_len = mat_K_BS < (nbf - i) ? mat_K_BS : (nbf - i); for (int j = i; j < nbf; j += mat_K_BS) { int j_len = mat_K_BS < (nbf - j) ? mat_K_BS : (nbf - j); size_t offset_i0 = (size_t) i * (size_t) n_occ * (size_t) df_nbf; size_t offset_j0 = (size_t) j * (size_t) n_occ * (size_t) df_nbf; double *K_ij = K_mat + i * nbf + j; double *temp_K_i = temp_K + offset_i0; double *temp_K_j = temp_K + offset_j0; int cnt, gid; if ((i_len == mat_K_BS) && (j_len == mat_K_BS)) { cnt = cnt0; gid = 0; cnt0++; } else { if ((i_len == mat_K_BS) && (j_len < mat_K_BS)) { cnt = cnt1; gid = 1; cnt1++; } else { cnt = cnt2; gid = 2; } } TinyDFT->mat_K_transa[gid] = CblasNoTrans; TinyDFT->mat_K_transb[gid] = CblasTrans; TinyDFT->mat_K_m[gid] = i_len; TinyDFT->mat_K_n[gid] = j_len; TinyDFT->mat_K_k[gid] = n_occ * df_nbf; TinyDFT->mat_K_alpha[gid] = 1.0; TinyDFT->mat_K_beta[gid] = 0.0; TinyDFT->mat_K_a[cnt] = temp_K_i; TinyDFT->mat_K_b[cnt] = temp_K_j; TinyDFT->mat_K_c[cnt] = K_ij; TinyDFT->mat_K_lda[gid] = n_occ * df_nbf; TinyDFT->mat_K_ldb[gid] = n_occ * df_nbf; TinyDFT->mat_K_ldc[gid] = nbf; } } } #ifndef USE_MKL #warning cblas_dgemm_batch() is not available in your BLAS library, will use cblas_dgemm to simulate it. void cblas_dgemm_batch( const CBLAS_LAYOUT Layout, const CBLAS_TRANSPOSE *transa_array, const CBLAS_TRANSPOSE *transb_array, const int *m_array, const int *n_array, const int *k_array, const double *alpha_array, const double **a_array, const int *lda_array, const double **b_array, const int *ldb_array, const double *beta_array, double **c_array, const int *ldc_array, const int group_count, const int *group_size ) { int idx = 0; for (int i = 0; i < group_count; i++) { const CBLAS_TRANSPOSE transa_i = transa_array[i]; const CBLAS_TRANSPOSE transb_i = transb_array[i]; const int m_i = m_array[i]; const int n_i = n_array[i]; const int k_i = k_array[i]; const int lda_i = lda_array[i]; const int ldb_i = ldb_array[i]; const int ldc_i = ldc_array[i]; const double alpha_i = alpha_array[i]; const double beta_i = beta_array[i]; for (int j = 0; j < group_size[i]; j++) { const double *a_idx = a_array[idx + j]; const double *b_idx = b_array[idx + j]; double *c_idx = c_array[idx + j]; cblas_dgemm( Layout, transa_i, transb_i, m_i, n_i, k_i, alpha_i, a_idx, lda_i, b_idx, ldb_i, beta_i, c_idx, ldc_i ); } idx += group_size[i]; } } #endif // Build temporary tensor for K matrix and form K matrix using Cocc matrix // High flop-per-byte ratio: access: nbf * df_nbf * (nbf + n_occ) , compute: nbf^2 * df_nbf * n_occ // Note: the K_mat is not completed, the symmetrizing is done later static void TinyDFT_build_Kmat_DF(TinyDFT_p TinyDFT, const double *Cocc_mat, double *K_mat, double *temp_K_t, double *K_mat_t) { int nbf = TinyDFT->nbf; int df_nbf = TinyDFT->df_nbf; int df_save_mem = TinyDFT->df_save_mem; int n_occ = TinyDFT->n_occ; int ngroups_temp_K = nbf; int bf_pair_cnt = TinyDFT->bf_mask_displs[nbf]; int *bf_pair_j = TinyDFT->bf_pair_j; int *bf_mask_displs = TinyDFT->bf_mask_displs; double *df_tensor = TinyDFT->df_tensor; double *temp_K = TinyDFT->temp_K; double *Cocc_tmp = TinyDFT->pqA; double t0, t1, t2; // Construct temporary tensor for K matrix // Formula: temp_K(i, s, p) = dot(Cocc_mat(1:nbf, s), df_tensor(i, 1:nbf, p)) t0 = get_wtime_sec(); if (df_save_mem == 0) { TinyDFT_set_batch_dgemm_temp_K(TinyDFT); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < bf_pair_cnt; i++) { int j = bf_pair_j[i]; size_t Cocc_tmp_offset = (size_t) i * (size_t) df_nbf; size_t Cocc_mat_offset = (size_t) j * (size_t) n_occ; double *Cocc_tmp_ptr = Cocc_tmp + Cocc_tmp_offset; const double *Cocc_mat_ptr = Cocc_mat + Cocc_mat_offset; memcpy(Cocc_tmp_ptr, Cocc_mat_ptr, DBL_MSIZE * n_occ); } cblas_dgemm_batch( CblasRowMajor, TinyDFT->mat_K_transa, TinyDFT->mat_K_transb, TinyDFT->mat_K_m, TinyDFT->mat_K_n, TinyDFT->mat_K_k, TinyDFT->mat_K_alpha, (const double **) TinyDFT->mat_K_a, TinyDFT->mat_K_lda, (const double **) TinyDFT->mat_K_b, TinyDFT->mat_K_ldb, TinyDFT->mat_K_beta, TinyDFT->mat_K_c, TinyDFT->mat_K_ldc, ngroups_temp_K, TinyDFT->mat_K_group_size ); } else { double *A_ptr = TinyDFT->Cocc_mat; double *temp_A = TinyDFT->tmp_mat; for (int i = 0; i < nbf; i++) { size_t offset_c = (size_t) i * (size_t) n_occ * (size_t) df_nbf; double *C_ptr = temp_K + offset_c; int j_idx_spos = bf_mask_displs[i]; int j_idx_epos = bf_mask_displs[i + 1]; for (int j_idx = j_idx_spos; j_idx < j_idx_epos; j_idx++) { int j = bf_pair_j[j_idx]; int cnt = j_idx - j_idx_spos; memcpy(temp_A + cnt * n_occ, A_ptr + j * n_occ, DBL_MSIZE * n_occ); } int ncols = j_idx_epos - j_idx_spos; size_t df_tensor_offset = (size_t) j_idx_spos * (size_t) df_nbf; double *df_tensor_ptr = df_tensor + df_tensor_offset; cblas_dgemm( CblasRowMajor, CblasTrans, CblasNoTrans, n_occ, df_nbf, ncols, 1.0, temp_A, n_occ, df_tensor_ptr, df_nbf, 0.0, C_ptr, df_nbf ); } } // End of "if (df_save_mem == 0)" t1 = get_wtime_sec(); // Build K matrix // Formula: K(i, j) = sum_{s=1}^{n_occ} [ dot(temp_K(i, s, 1:df_nbf), temp_K(j, s, 1:df_nbf)) ] if (nbf <= 1024) { cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasTrans, nbf, nbf, n_occ * df_nbf, 1.0, temp_K, n_occ * df_nbf, temp_K, n_occ * df_nbf, 0.0, K_mat, nbf ); } else { int ngroups = 3; TinyDFT_set_batch_dgemm_K(TinyDFT, K_mat); if (TinyDFT->mat_K_group_size[1] == 0) ngroups = 1; cblas_dgemm_batch( CblasRowMajor, TinyDFT->mat_K_transa, TinyDFT->mat_K_transb, TinyDFT->mat_K_m, TinyDFT->mat_K_n, TinyDFT->mat_K_k, TinyDFT->mat_K_alpha, (const double **) TinyDFT->mat_K_a, TinyDFT->mat_K_lda, (const double **) TinyDFT->mat_K_b, TinyDFT->mat_K_ldb, TinyDFT->mat_K_beta, TinyDFT->mat_K_c, TinyDFT->mat_K_ldc, ngroups, TinyDFT->mat_K_group_size ); } t2 = get_wtime_sec(); *temp_K_t = t1 - t0; *K_mat_t = t2 - t1; } void TinyDFT_build_JKmat_DF(TinyDFT_p TinyDFT, const double *D_mat, const double *Cocc_mat, double *J_mat, double *K_mat) { if (J_mat == NULL && K_mat == NULL) return; int nbf = TinyDFT->nbf; double st, et, total_t, symm_t; double temp_J_t = 0.0, J_mat_t = 0.0; double temp_K_t = 0.0, K_mat_t = 0.0; if (J_mat != NULL) { TinyDFT_build_Jmat_DF(TinyDFT, D_mat, J_mat, &temp_J_t, &J_mat_t); st = get_wtime_sec(); #pragma omp for schedule(dynamic) for (int i = 1; i < nbf; i++) { #pragma omp simd for (int j = 0; j < i; j++) J_mat[i * nbf + j] = J_mat[j * nbf + i]; } et = get_wtime_sec(); symm_t = et - st; total_t = temp_J_t + J_mat_t + symm_t; printf( "* Build J mat using DF : %.3lf (s), " "aux / Jmat / symm = %.3lf, %.3lf, %.3lf\n", total_t, temp_J_t, J_mat_t, symm_t ); } if (K_mat != NULL) { if (TinyDFT->temp_K == NULL) { size_t temp_K_msize = (size_t) TinyDFT->df_nbf * (size_t) TinyDFT->n_occ * (size_t) TinyDFT->nbf; temp_K_msize *= DBL_MSIZE; st = get_wtime_sec(); TinyDFT->temp_K = (double*) malloc_aligned(temp_K_msize, 64); assert(TinyDFT->temp_K != NULL); et = get_wtime_sec(); TinyDFT->mem_size += (double) temp_K_msize; printf("Allocate auxiliary tensor for density fitting K matrix build : %.3lf (s)\n", et - st); } TinyDFT_build_Kmat_DF(TinyDFT, Cocc_mat, K_mat, &temp_K_t, &K_mat_t); st = get_wtime_sec(); #pragma omp for schedule(dynamic) for (int i = 1; i < nbf; i++) { #pragma omp simd for (int j = 0; j < i; j++) K_mat[i * nbf + j] = K_mat[j * nbf + i]; } et = get_wtime_sec(); symm_t = et - st; total_t = temp_K_t + K_mat_t + symm_t; printf( "* Build K mat using DF : %.3lf (s), " "aux / Kmat / symm = %.3lf, %.3lf, %.3lf\n", total_t, temp_K_t, K_mat_t, symm_t ); } }
GB_binop__first_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__first_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fp64) // A*D function (colscale): GB (_AxD__first_fp64) // D*A function (rowscale): GB (_DxB__first_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__first_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__first_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FP64 || GxB_NO_FIRST_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
1e7417_ac_wave_so4.c
#define _POSIX_C_SOURCE 200809L #define START_TIMER(S) \ struct timeval start_##S, end_##S; \ gettimeofday(&start_##S, NULL); #define STOP_TIMER(S, T) \ gettimeofday(&end_##S, NULL); \ T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000; #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int yb_size = block_sizes[1]; int x0_blk0_size = block_sizes[2]; int y0_blk0_size = block_sizes[3]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); START_TIMER(section0) for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t1 = (time + 2) % (3), t0 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ STOP_TIMER(section0, timers) } for (int time = time_m, t0 = (time + 1) % (3); time <= time_M; time += 1, t0 = (time + 1) % (3)) { /* Begin section1 */ START_TIMER(section1) STOP_TIMER(section1, timers) /* End section1 */ } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 4][y - time + 4][z + 4]*vp[x - time + 4][y - time + 4][z + 4]); u[t2][x - time + 4][y - time + 4][z + 4] = (r6*(-r7*(u[t0][x - time + 4][y - time + 4][z + 4] - 2.0F*u[t1][x - time + 4][y - time + 4][z + 4])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t1][x - time + 4][y - time + 4][z + 4]) - 3.70370379e-4F*(u[t1][x - time + 2][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 2][z + 4] + u[t1][x - time + 4][y - time + 4][z + 2] + u[t1][x - time + 4][y - time + 4][z + 6] + u[t1][x - time + 4][y - time + 6][z + 4] + u[t1][x - time + 6][y - time + 4][z + 4]) + 5.92592607e-3F*(u[t1][x - time + 3][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 3][z + 4] + u[t1][x - time + 4][y - time + 4][z + 3] + u[t1][x - time + 4][y - time + 4][z + 5] + u[t1][x - time + 4][y - time + 5][z + 4] + u[t1][x - time + 5][y - time + 4][z + 4]) - 3.33333341e-2F*u[t1][x - time + 4][y - time + 4][z + 4])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 4][y - time + 4][zind + 4] += r0; } } } } } } }
divcon_sum_omp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> double sum(const double *a, size_t n) { size_t half = n / 2; double res1; double res2; // edge cases if (n == 0) return 0; if (n == 1) return *a; if (n<1000) { // revert to serial if n too small return sum(a,half) + sum(a + half, n - half); } else { // parallel recursive case #pragma omp task shared(res1) res1=sum(a,half); #pragma omp task shared(res2) res2=sum(a + half, n - half); #pragma omp taskwait res1 = res1+res2; return res1; } } int main (int argc, char *argv[]) { int N,i; double *list,res,kgo; double t_start,t_end; N=400000000; list=malloc(N*sizeof(double)); for(i=0;i<N;i++) { list[i] = (double) i; } t_start=omp_get_wtime(); #pragma omp parallel { #pragma omp single nowait res=sum(list, N); } t_end=omp_get_wtime(); kgo=0.5* N*(N-1); printf("error=%g\n",res-kgo); printf("Took %g s\n",t_end - t_start); free(list); return EXIT_SUCCESS; }
ipa-fnsummary.c
/* Function summary pass. Copyright (C) 2003-2018 Free Software Foundation, Inc. Contributed by Jan Hubicka This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Analysis of function bodies used by inter-procedural passes We estimate for each function - function body size and size after specializing into given context - average function execution time in a given context - function frame size For each call - call statement size, time and how often the parameters change ipa_fn_summary data structures store above information locally (i.e. parameters of the function itself) and globally (i.e. parameters of the function created by applying all the inline decisions already present in the callgraph). We provide access to the ipa_fn_summary data structure and basic logic updating the parameters when inlining is performed. The summaries are context sensitive. Context means 1) partial assignment of known constant values of operands 2) whether function is inlined into the call or not. It is easy to add more variants. To represent function size and time that depends on context (i.e. it is known to be optimized away when context is known either by inlining or from IP-CP and cloning), we use predicates. estimate_edge_size_and_time can be used to query function size/time in the given context. ipa_merge_fn_summary_after_inlining merges properties of caller and callee after inlining. Finally pass_inline_parameters is exported. This is used to drive computation of function parameters used by the early inliner. IPA inlined performs analysis via its analyze_function method. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "tree.h" #include "gimple.h" #include "alloc-pool.h" #include "tree-pass.h" #include "ssa.h" #include "tree-streamer.h" #include "cgraph.h" #include "diagnostic.h" #include "fold-const.h" #include "print-tree.h" #include "tree-inline.h" #include "gimple-pretty-print.h" #include "params.h" #include "cfganal.h" #include "gimple-iterator.h" #include "tree-cfg.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "symbol-summary.h" #include "ipa-prop.h" #include "ipa-fnsummary.h" #include "cfgloop.h" #include "tree-scalar-evolution.h" #include "ipa-utils.h" #include "cfgexpand.h" #include "gimplify.h" #include "stringpool.h" #include "attribs.h" /* Summaries. */ function_summary <ipa_fn_summary *> *ipa_fn_summaries; call_summary <ipa_call_summary *> *ipa_call_summaries; /* Edge predicates goes here. */ static object_allocator<predicate> edge_predicate_pool ("edge predicates"); /* Dump IPA hints. */ void ipa_dump_hints (FILE *f, ipa_hints hints) { if (!hints) return; fprintf (f, "IPA hints:"); if (hints & INLINE_HINT_indirect_call) { hints &= ~INLINE_HINT_indirect_call; fprintf (f, " indirect_call"); } if (hints & INLINE_HINT_loop_iterations) { hints &= ~INLINE_HINT_loop_iterations; fprintf (f, " loop_iterations"); } if (hints & INLINE_HINT_loop_stride) { hints &= ~INLINE_HINT_loop_stride; fprintf (f, " loop_stride"); } if (hints & INLINE_HINT_same_scc) { hints &= ~INLINE_HINT_same_scc; fprintf (f, " same_scc"); } if (hints & INLINE_HINT_in_scc) { hints &= ~INLINE_HINT_in_scc; fprintf (f, " in_scc"); } if (hints & INLINE_HINT_cross_module) { hints &= ~INLINE_HINT_cross_module; fprintf (f, " cross_module"); } if (hints & INLINE_HINT_declared_inline) { hints &= ~INLINE_HINT_declared_inline; fprintf (f, " declared_inline"); } if (hints & INLINE_HINT_array_index) { hints &= ~INLINE_HINT_array_index; fprintf (f, " array_index"); } if (hints & INLINE_HINT_known_hot) { hints &= ~INLINE_HINT_known_hot; fprintf (f, " known_hot"); } gcc_assert (!hints); } /* Record SIZE and TIME to SUMMARY. The accounted code will be executed when EXEC_PRED is true. When NONCONST_PRED is false the code will evaulate to constant and will get optimized out in specialized clones of the function. */ void ipa_fn_summary::account_size_time (int size, sreal time, const predicate &exec_pred, const predicate &nonconst_pred_in) { size_time_entry *e; bool found = false; int i; predicate nonconst_pred; if (exec_pred == false) return; nonconst_pred = nonconst_pred_in & exec_pred; if (nonconst_pred == false) return; /* We need to create initial empty unconitional clause, but otherwie we don't need to account empty times and sizes. */ if (!size && time == 0 && size_time_table) return; gcc_assert (time >= 0); for (i = 0; vec_safe_iterate (size_time_table, i, &e); i++) if (e->exec_predicate == exec_pred && e->nonconst_predicate == nonconst_pred) { found = true; break; } if (i == 256) { i = 0; found = true; e = &(*size_time_table)[0]; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\t\tReached limit on number of entries, " "ignoring the predicate."); } if (dump_file && (dump_flags & TDF_DETAILS) && (time != 0 || size)) { fprintf (dump_file, "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate exec:", ((double) size) / ipa_fn_summary::size_scale, (time.to_double ()), found ? "" : "new "); exec_pred.dump (dump_file, conds, 0); if (exec_pred != nonconst_pred) { fprintf (dump_file, " nonconst:"); nonconst_pred.dump (dump_file, conds); } else fprintf (dump_file, "\n"); } if (!found) { struct size_time_entry new_entry; new_entry.size = size; new_entry.time = time; new_entry.exec_predicate = exec_pred; new_entry.nonconst_predicate = nonconst_pred; vec_safe_push (size_time_table, new_entry); } else { e->size += size; e->time += time; } } /* We proved E to be unreachable, redirect it to __bultin_unreachable. */ static struct cgraph_edge * redirect_to_unreachable (struct cgraph_edge *e) { struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL; struct cgraph_node *target = cgraph_node::get_create (builtin_decl_implicit (BUILT_IN_UNREACHABLE)); if (e->speculative) e = e->resolve_speculation (target->decl); else if (!e->callee) e->make_direct (target); else e->redirect_callee (target); struct ipa_call_summary *es = ipa_call_summaries->get (e); e->inline_failed = CIF_UNREACHABLE; e->count = profile_count::zero (); es->call_stmt_size = 0; es->call_stmt_time = 0; if (callee) callee->remove_symbol_and_inline_clones (); return e; } /* Set predicate for edge E. */ static void edge_set_predicate (struct cgraph_edge *e, predicate *predicate) { /* If the edge is determined to be never executed, redirect it to BUILTIN_UNREACHABLE to make it clear to IPA passes the call will be optimized out. */ if (predicate && *predicate == false /* When handling speculative edges, we need to do the redirection just once. Do it always on the direct edge, so we do not attempt to resolve speculation while duplicating the edge. */ && (!e->speculative || e->callee)) e = redirect_to_unreachable (e); struct ipa_call_summary *es = ipa_call_summaries->get (e); if (predicate && *predicate != true) { if (!es->predicate) es->predicate = edge_predicate_pool.allocate (); *es->predicate = *predicate; } else { if (es->predicate) edge_predicate_pool.remove (es->predicate); es->predicate = NULL; } } /* Set predicate for hint *P. */ static void set_hint_predicate (predicate **p, predicate new_predicate) { if (new_predicate == false || new_predicate == true) { if (*p) edge_predicate_pool.remove (*p); *p = NULL; } else { if (!*p) *p = edge_predicate_pool.allocate (); **p = new_predicate; } } /* Compute what conditions may or may not hold given invormation about parameters. RET_CLAUSE returns truths that may hold in a specialized copy, whie RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized copy when called in a given context. It is a bitmask of conditions. Bit 0 means that condition is known to be false, while bit 1 means that condition may or may not be true. These differs - for example NOT_INLINED condition is always false in the second and also builtin_constant_p tests can not use the fact that parameter is indeed a constant. KNOWN_VALS is partial mapping of parameters of NODE to constant values. KNOWN_AGGS is a vector of aggreggate jump functions for each parameter. Return clause of possible truths. When INLINE_P is true, assume that we are inlining. ERROR_MARK means compile time invariant. */ static void evaluate_conditions_for_known_args (struct cgraph_node *node, bool inline_p, vec<tree> known_vals, vec<ipa_agg_jump_function_p> known_aggs, clause_t *ret_clause, clause_t *ret_nonspec_clause) { clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition; clause_t nonspec_clause = 1 << predicate::not_inlined_condition; struct ipa_fn_summary *info = ipa_fn_summaries->get (node); int i; struct condition *c; for (i = 0; vec_safe_iterate (info->conds, i, &c); i++) { tree val; tree res; /* We allow call stmt to have fewer arguments than the callee function (especially for K&R style programs). So bound check here (we assume known_aggs vector, if non-NULL, has the same length as known_vals). */ gcc_checking_assert (!known_aggs.exists () || (known_vals.length () == known_aggs.length ())); if (c->operand_num >= (int) known_vals.length ()) { clause |= 1 << (i + predicate::first_dynamic_condition); nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } if (c->agg_contents) { struct ipa_agg_jump_function *agg; if (c->code == predicate::changed && !c->by_ref && (known_vals[c->operand_num] == error_mark_node)) continue; if (known_aggs.exists ()) { agg = known_aggs[c->operand_num]; val = ipa_find_agg_cst_for_param (agg, known_vals[c->operand_num], c->offset, c->by_ref); } else val = NULL_TREE; } else { val = known_vals[c->operand_num]; if (val == error_mark_node && c->code != predicate::changed) val = NULL_TREE; } if (!val) { clause |= 1 << (i + predicate::first_dynamic_condition); nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } if (c->code == predicate::changed) { nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } if (tree_to_shwi (TYPE_SIZE (TREE_TYPE (val))) != c->size) { clause |= 1 << (i + predicate::first_dynamic_condition); nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } if (c->code == predicate::is_not_constant) { nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (c->val), val); res = val ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val) : NULL; if (res && integer_zerop (res)) continue; clause |= 1 << (i + predicate::first_dynamic_condition); nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); } *ret_clause = clause; if (ret_nonspec_clause) *ret_nonspec_clause = nonspec_clause; } /* Work out what conditions might be true at invocation of E. */ void evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p, clause_t *clause_ptr, clause_t *nonspec_clause_ptr, vec<tree> *known_vals_ptr, vec<ipa_polymorphic_call_context> *known_contexts_ptr, vec<ipa_agg_jump_function_p> *known_aggs_ptr) { struct cgraph_node *callee = e->callee->ultimate_alias_target (); struct ipa_fn_summary *info = ipa_fn_summaries->get (callee); vec<tree> known_vals = vNULL; vec<ipa_agg_jump_function_p> known_aggs = vNULL; if (clause_ptr) *clause_ptr = inline_p ? 0 : 1 << predicate::not_inlined_condition; if (known_vals_ptr) known_vals_ptr->create (0); if (known_contexts_ptr) known_contexts_ptr->create (0); if (ipa_node_params_sum && !e->call_stmt_cannot_inline_p && ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr)) { struct ipa_node_params *caller_parms_info, *callee_pi; struct ipa_edge_args *args = IPA_EDGE_REF (e); struct ipa_call_summary *es = ipa_call_summaries->get (e); int i, count = ipa_get_cs_argument_count (args); if (e->caller->global.inlined_to) caller_parms_info = IPA_NODE_REF (e->caller->global.inlined_to); else caller_parms_info = IPA_NODE_REF (e->caller); callee_pi = IPA_NODE_REF (e->callee); if (count && (info->conds || known_vals_ptr)) known_vals.safe_grow_cleared (count); if (count && (info->conds || known_aggs_ptr)) known_aggs.safe_grow_cleared (count); if (count && known_contexts_ptr) known_contexts_ptr->safe_grow_cleared (count); for (i = 0; i < count; i++) { struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i); tree cst = ipa_value_from_jfunc (caller_parms_info, jf, ipa_get_type (callee_pi, i)); if (!cst && e->call_stmt && i < (int)gimple_call_num_args (e->call_stmt)) { cst = gimple_call_arg (e->call_stmt, i); if (!is_gimple_min_invariant (cst)) cst = NULL; } if (cst) { gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO); if (known_vals.exists ()) known_vals[i] = cst; } else if (inline_p && !es->param[i].change_prob) known_vals[i] = error_mark_node; if (known_contexts_ptr) (*known_contexts_ptr)[i] = ipa_context_from_jfunc (caller_parms_info, e, i, jf); /* TODO: When IPA-CP starts propagating and merging aggregate jump functions, use its knowledge of the caller too, just like the scalar case above. */ known_aggs[i] = &jf->agg; } } else if (e->call_stmt && !e->call_stmt_cannot_inline_p && ((clause_ptr && info->conds) || known_vals_ptr)) { int i, count = (int)gimple_call_num_args (e->call_stmt); if (count && (info->conds || known_vals_ptr)) known_vals.safe_grow_cleared (count); for (i = 0; i < count; i++) { tree cst = gimple_call_arg (e->call_stmt, i); if (!is_gimple_min_invariant (cst)) cst = NULL; if (cst) known_vals[i] = cst; } } evaluate_conditions_for_known_args (callee, inline_p, known_vals, known_aggs, clause_ptr, nonspec_clause_ptr); if (known_vals_ptr) *known_vals_ptr = known_vals; else known_vals.release (); if (known_aggs_ptr) *known_aggs_ptr = known_aggs; else known_aggs.release (); } /* Allocate the function summary. */ static void ipa_fn_summary_alloc (void) { gcc_checking_assert (!ipa_fn_summaries); ipa_fn_summaries = ipa_fn_summary_t::create_ggc (symtab); ipa_call_summaries = new ipa_call_summary_t (symtab, false); } /* We are called multiple time for given function; clear data from previous run so they are not cumulated. */ void ipa_call_summary::reset () { call_stmt_size = call_stmt_time = 0; is_return_callee_uncaptured = false; if (predicate) edge_predicate_pool.remove (predicate); predicate = NULL; param.release (); } /* We are called multiple time for given function; clear data from previous run so they are not cumulated. */ void ipa_fn_summary::reset (struct cgraph_node *node) { struct cgraph_edge *e; self_size = 0; estimated_stack_size = 0; estimated_self_stack_size = 0; stack_frame_offset = 0; size = 0; time = 0; growth = 0; scc_no = 0; if (loop_iterations) { edge_predicate_pool.remove (loop_iterations); loop_iterations = NULL; } if (loop_stride) { edge_predicate_pool.remove (loop_stride); loop_stride = NULL; } if (array_index) { edge_predicate_pool.remove (array_index); array_index = NULL; } vec_free (conds); vec_free (size_time_table); for (e = node->callees; e; e = e->next_callee) ipa_call_summaries->get (e)->reset (); for (e = node->indirect_calls; e; e = e->next_callee) ipa_call_summaries->get (e)->reset (); fp_expressions = false; } /* Hook that is called by cgraph.c when a node is removed. */ void ipa_fn_summary_t::remove (cgraph_node *node, ipa_fn_summary *info) { info->reset (node); } /* Same as remap_predicate_after_duplication but handle hint predicate *P. Additionally care about allocating new memory slot for updated predicate and set it to NULL when it becomes true or false (and thus uninteresting). */ static void remap_hint_predicate_after_duplication (predicate **p, clause_t possible_truths) { predicate new_predicate; if (!*p) return; new_predicate = (*p)->remap_after_duplication (possible_truths); /* We do not want to free previous predicate; it is used by node origin. */ *p = NULL; set_hint_predicate (p, new_predicate); } /* Hook that is called by cgraph.c when a node is duplicated. */ void ipa_fn_summary_t::duplicate (cgraph_node *src, cgraph_node *dst, ipa_fn_summary *, ipa_fn_summary *info) { memcpy (info, ipa_fn_summaries->get (src), sizeof (ipa_fn_summary)); /* TODO: as an optimization, we may avoid copying conditions that are known to be false or true. */ info->conds = vec_safe_copy (info->conds); /* When there are any replacements in the function body, see if we can figure out that something was optimized out. */ if (ipa_node_params_sum && dst->clone.tree_map) { vec<size_time_entry, va_gc> *entry = info->size_time_table; /* Use SRC parm info since it may not be copied yet. */ struct ipa_node_params *parms_info = IPA_NODE_REF (src); vec<tree> known_vals = vNULL; int count = ipa_get_param_count (parms_info); int i, j; clause_t possible_truths; predicate true_pred = true; size_time_entry *e; int optimized_out_size = 0; bool inlined_to_p = false; struct cgraph_edge *edge, *next; info->size_time_table = 0; known_vals.safe_grow_cleared (count); for (i = 0; i < count; i++) { struct ipa_replace_map *r; for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++) { if (((!r->old_tree && r->parm_num == i) || (r->old_tree && r->old_tree == ipa_get_param (parms_info, i))) && r->replace_p && !r->ref_p) { known_vals[i] = r->new_tree; break; } } } evaluate_conditions_for_known_args (dst, false, known_vals, vNULL, &possible_truths, /* We are going to specialize, so ignore nonspec truths. */ NULL); known_vals.release (); info->account_size_time (0, 0, true_pred, true_pred); /* Remap size_time vectors. Simplify the predicate by prunning out alternatives that are known to be false. TODO: as on optimization, we can also eliminate conditions known to be true. */ for (i = 0; vec_safe_iterate (entry, i, &e); i++) { predicate new_exec_pred; predicate new_nonconst_pred; new_exec_pred = e->exec_predicate.remap_after_duplication (possible_truths); new_nonconst_pred = e->nonconst_predicate.remap_after_duplication (possible_truths); if (new_exec_pred == false || new_nonconst_pred == false) optimized_out_size += e->size; else info->account_size_time (e->size, e->time, new_exec_pred, new_nonconst_pred); } /* Remap edge predicates with the same simplification as above. Also copy constantness arrays. */ for (edge = dst->callees; edge; edge = next) { predicate new_predicate; struct ipa_call_summary *es = ipa_call_summaries->get (edge); next = edge->next_callee; if (!edge->inline_failed) inlined_to_p = true; if (!es->predicate) continue; new_predicate = es->predicate->remap_after_duplication (possible_truths); if (new_predicate == false && *es->predicate != false) optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale; edge_set_predicate (edge, &new_predicate); } /* Remap indirect edge predicates with the same simplificaiton as above. Also copy constantness arrays. */ for (edge = dst->indirect_calls; edge; edge = next) { predicate new_predicate; struct ipa_call_summary *es = ipa_call_summaries->get (edge); next = edge->next_callee; gcc_checking_assert (edge->inline_failed); if (!es->predicate) continue; new_predicate = es->predicate->remap_after_duplication (possible_truths); if (new_predicate == false && *es->predicate != false) optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale; edge_set_predicate (edge, &new_predicate); } remap_hint_predicate_after_duplication (&info->loop_iterations, possible_truths); remap_hint_predicate_after_duplication (&info->loop_stride, possible_truths); remap_hint_predicate_after_duplication (&info->array_index, possible_truths); /* If inliner or someone after inliner will ever start producing non-trivial clones, we will get trouble with lack of information about updating self sizes, because size vectors already contains sizes of the calees. */ gcc_assert (!inlined_to_p || !optimized_out_size); } else { info->size_time_table = vec_safe_copy (info->size_time_table); if (info->loop_iterations) { predicate p = *info->loop_iterations; info->loop_iterations = NULL; set_hint_predicate (&info->loop_iterations, p); } if (info->loop_stride) { predicate p = *info->loop_stride; info->loop_stride = NULL; set_hint_predicate (&info->loop_stride, p); } if (info->array_index) { predicate p = *info->array_index; info->array_index = NULL; set_hint_predicate (&info->array_index, p); } } if (!dst->global.inlined_to) ipa_update_overall_fn_summary (dst); } /* Hook that is called by cgraph.c when a node is duplicated. */ void ipa_call_summary_t::duplicate (struct cgraph_edge *src, struct cgraph_edge *dst, struct ipa_call_summary *srcinfo, struct ipa_call_summary *info) { *info = *srcinfo; info->predicate = NULL; edge_set_predicate (dst, srcinfo->predicate); info->param = srcinfo->param.copy (); if (!dst->indirect_unknown_callee && src->indirect_unknown_callee) { info->call_stmt_size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost); info->call_stmt_time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost); } } /* Keep edge cache consistent across edge removal. */ void ipa_call_summary_t::remove (struct cgraph_edge *, struct ipa_call_summary *sum) { sum->reset (); } /* Dump edge summaries associated to NODE and recursively to all clones. Indent by INDENT. */ static void dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node, struct ipa_fn_summary *info) { struct cgraph_edge *edge; for (edge = node->callees; edge; edge = edge->next_callee) { struct ipa_call_summary *es = ipa_call_summaries->get (edge); struct cgraph_node *callee = edge->callee->ultimate_alias_target (); int i; fprintf (f, "%*s%s/%i %s\n%*s loop depth:%2i freq:%4.2f size:%2i" " time: %2i callee size:%2i stack:%2i", indent, "", callee->name (), callee->order, !edge->inline_failed ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed), indent, "", es->loop_depth, edge->sreal_frequency ().to_double (), es->call_stmt_size, es->call_stmt_time, (int) ipa_fn_summaries->get (callee)->size / ipa_fn_summary::size_scale, (int) ipa_fn_summaries->get (callee)->estimated_stack_size); if (es->predicate) { fprintf (f, " predicate: "); es->predicate->dump (f, info->conds); } else fprintf (f, "\n"); if (es->param.exists ()) for (i = 0; i < (int) es->param.length (); i++) { int prob = es->param[i].change_prob; if (!prob) fprintf (f, "%*s op%i is compile time invariant\n", indent + 2, "", i); else if (prob != REG_BR_PROB_BASE) fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i, prob * 100.0 / REG_BR_PROB_BASE); } if (!edge->inline_failed) { fprintf (f, "%*sStack frame offset %i, callee self size %i," " callee size %i\n", indent + 2, "", (int) ipa_fn_summaries->get (callee)->stack_frame_offset, (int) ipa_fn_summaries->get (callee)->estimated_self_stack_size, (int) ipa_fn_summaries->get (callee)->estimated_stack_size); dump_ipa_call_summary (f, indent + 2, callee, info); } } for (edge = node->indirect_calls; edge; edge = edge->next_callee) { struct ipa_call_summary *es = ipa_call_summaries->get (edge); fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i" " time: %2i", indent, "", es->loop_depth, edge->sreal_frequency ().to_double (), es->call_stmt_size, es->call_stmt_time); if (es->predicate) { fprintf (f, "predicate: "); es->predicate->dump (f, info->conds); } else fprintf (f, "\n"); } } void ipa_dump_fn_summary (FILE *f, struct cgraph_node *node) { if (node->definition) { struct ipa_fn_summary *s = ipa_fn_summaries->get (node); size_time_entry *e; int i; fprintf (f, "IPA function summary for %s/%i", node->name (), node->order); if (DECL_DISREGARD_INLINE_LIMITS (node->decl)) fprintf (f, " always_inline"); if (s->inlinable) fprintf (f, " inlinable"); if (s->fp_expressions) fprintf (f, " fp_expression"); fprintf (f, "\n global time: %f\n", s->time.to_double ()); fprintf (f, " self size: %i\n", s->self_size); fprintf (f, " global size: %i\n", s->size); fprintf (f, " min size: %i\n", s->min_size); fprintf (f, " self stack: %i\n", (int) s->estimated_self_stack_size); fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size); if (s->growth) fprintf (f, " estimated growth:%i\n", (int) s->growth); if (s->scc_no) fprintf (f, " In SCC: %i\n", (int) s->scc_no); for (i = 0; vec_safe_iterate (s->size_time_table, i, &e); i++) { fprintf (f, " size:%f, time:%f", (double) e->size / ipa_fn_summary::size_scale, e->time.to_double ()); if (e->exec_predicate != true) { fprintf (f, ", executed if:"); e->exec_predicate.dump (f, s->conds, 0); } if (e->exec_predicate != e->nonconst_predicate) { fprintf (f, ", nonconst if:"); e->nonconst_predicate.dump (f, s->conds, 0); } fprintf (f, "\n"); } if (s->loop_iterations) { fprintf (f, " loop iterations:"); s->loop_iterations->dump (f, s->conds); } if (s->loop_stride) { fprintf (f, " loop stride:"); s->loop_stride->dump (f, s->conds); } if (s->array_index) { fprintf (f, " array index:"); s->array_index->dump (f, s->conds); } fprintf (f, " calls:\n"); dump_ipa_call_summary (f, 4, node, s); fprintf (f, "\n"); } } DEBUG_FUNCTION void ipa_debug_fn_summary (struct cgraph_node *node) { ipa_dump_fn_summary (stderr, node); } void ipa_dump_fn_summaries (FILE *f) { struct cgraph_node *node; FOR_EACH_DEFINED_FUNCTION (node) if (!node->global.inlined_to) ipa_dump_fn_summary (f, node); } /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the boolean variable pointed to by DATA. */ static bool mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED, void *data) { bool *b = (bool *) data; *b = true; return true; } /* If OP refers to value of function parameter, return the corresponding parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the PARM_DECL) will be stored to *SIZE_P in that case too. */ static tree unmodified_parm_1 (gimple *stmt, tree op, HOST_WIDE_INT *size_p) { /* SSA_NAME referring to parm default def? */ if (TREE_CODE (op) == SSA_NAME && SSA_NAME_IS_DEFAULT_DEF (op) && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL) { if (size_p) *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op))); return SSA_NAME_VAR (op); } /* Non-SSA parm reference? */ if (TREE_CODE (op) == PARM_DECL) { bool modified = false; ao_ref refd; ao_ref_init (&refd, op); walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified, NULL); if (!modified) { if (size_p) *size_p = tree_to_shwi (TYPE_SIZE (TREE_TYPE (op))); return op; } } return NULL_TREE; } /* If OP refers to value of function parameter, return the corresponding parameter. Also traverse chains of SSA register assignments. If non-NULL, the size of the memory load (or the SSA_NAME of the PARM_DECL) will be stored to *SIZE_P in that case too. */ static tree unmodified_parm (gimple *stmt, tree op, HOST_WIDE_INT *size_p) { tree res = unmodified_parm_1 (stmt, op, size_p); if (res) return res; if (TREE_CODE (op) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (op) && gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) return unmodified_parm (SSA_NAME_DEF_STMT (op), gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)), size_p); return NULL_TREE; } /* If OP refers to a value of a function parameter or value loaded from an aggregate passed to a parameter (either by value or reference), return TRUE and store the number of the parameter to *INDEX_P, the access size into *SIZE_P, and information whether and how it has been loaded from an aggregate into *AGGPOS. INFO describes the function parameters, STMT is the statement in which OP is used or loaded. */ static bool unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi, gimple *stmt, tree op, int *index_p, HOST_WIDE_INT *size_p, struct agg_position_info *aggpos) { tree res = unmodified_parm_1 (stmt, op, size_p); gcc_checking_assert (aggpos); if (res) { *index_p = ipa_get_param_decl_index (fbi->info, res); if (*index_p < 0) return false; aggpos->agg_contents = false; aggpos->by_ref = false; return true; } if (TREE_CODE (op) == SSA_NAME) { if (SSA_NAME_IS_DEFAULT_DEF (op) || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) return false; stmt = SSA_NAME_DEF_STMT (op); op = gimple_assign_rhs1 (stmt); if (!REFERENCE_CLASS_P (op)) return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p, aggpos); } aggpos->agg_contents = true; return ipa_load_from_parm_agg (fbi, fbi->info->descriptors, stmt, op, index_p, &aggpos->offset, size_p, &aggpos->by_ref); } /* See if statement might disappear after inlining. 0 - means not eliminated 1 - half of statements goes away 2 - for sure it is eliminated. We are not terribly sophisticated, basically looking for simple abstraction penalty wrappers. */ static int eliminated_by_inlining_prob (gimple *stmt) { enum gimple_code code = gimple_code (stmt); enum tree_code rhs_code; if (!optimize) return 0; switch (code) { case GIMPLE_RETURN: return 2; case GIMPLE_ASSIGN: if (gimple_num_ops (stmt) != 2) return 0; rhs_code = gimple_assign_rhs_code (stmt); /* Casts of parameters, loads from parameters passed by reference and stores to return value or parameters are often free after inlining dua to SRA and further combining. Assume that half of statements goes away. */ if (CONVERT_EXPR_CODE_P (rhs_code) || rhs_code == VIEW_CONVERT_EXPR || rhs_code == ADDR_EXPR || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS) { tree rhs = gimple_assign_rhs1 (stmt); tree lhs = gimple_assign_lhs (stmt); tree inner_rhs = get_base_address (rhs); tree inner_lhs = get_base_address (lhs); bool rhs_free = false; bool lhs_free = false; if (!inner_rhs) inner_rhs = rhs; if (!inner_lhs) inner_lhs = lhs; /* Reads of parameter are expected to be free. */ if (unmodified_parm (stmt, inner_rhs, NULL)) rhs_free = true; /* Match expressions of form &this->field. Those will most likely combine with something upstream after inlining. */ else if (TREE_CODE (inner_rhs) == ADDR_EXPR) { tree op = get_base_address (TREE_OPERAND (inner_rhs, 0)); if (TREE_CODE (op) == PARM_DECL) rhs_free = true; else if (TREE_CODE (op) == MEM_REF && unmodified_parm (stmt, TREE_OPERAND (op, 0), NULL)) rhs_free = true; } /* When parameter is not SSA register because its address is taken and it is just copied into one, the statement will be completely free after inlining (we will copy propagate backward). */ if (rhs_free && is_gimple_reg (lhs)) return 2; /* Reads of parameters passed by reference expected to be free (i.e. optimized out after inlining). */ if (TREE_CODE (inner_rhs) == MEM_REF && unmodified_parm (stmt, TREE_OPERAND (inner_rhs, 0), NULL)) rhs_free = true; /* Copying parameter passed by reference into gimple register is probably also going to copy propagate, but we can't be quite sure. */ if (rhs_free && is_gimple_reg (lhs)) lhs_free = true; /* Writes to parameters, parameters passed by value and return value (either dirrectly or passed via invisible reference) are free. TODO: We ought to handle testcase like struct a {int a,b;}; struct a retrurnsturct (void) { struct a a ={1,2}; return a; } This translate into: retrurnsturct () { int a$b; int a$a; struct a a; struct a D.2739; <bb 2>: D.2739.a = 1; D.2739.b = 2; return D.2739; } For that we either need to copy ipa-split logic detecting writes to return value. */ if (TREE_CODE (inner_lhs) == PARM_DECL || TREE_CODE (inner_lhs) == RESULT_DECL || (TREE_CODE (inner_lhs) == MEM_REF && (unmodified_parm (stmt, TREE_OPERAND (inner_lhs, 0), NULL) || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0)) && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))) == RESULT_DECL)))) lhs_free = true; if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs))) rhs_free = true; if (lhs_free && rhs_free) return 1; } return 0; default: return 0; } } /* If BB ends by a conditional we can turn into predicates, attach corresponding predicates to the CFG edges. */ static void set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, struct ipa_fn_summary *summary, basic_block bb) { gimple *last; tree op; int index; HOST_WIDE_INT size; struct agg_position_info aggpos; enum tree_code code, inverted_code; edge e; edge_iterator ei; gimple *set_stmt; tree op2; last = last_stmt (bb); if (!last || gimple_code (last) != GIMPLE_COND) return; if (!is_gimple_ip_invariant (gimple_cond_rhs (last))) return; op = gimple_cond_lhs (last); /* TODO: handle conditionals like var = op0 < 4; if (var != 0). */ if (unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos)) { code = gimple_cond_code (last); inverted_code = invert_tree_comparison (code, HONOR_NANS (op)); FOR_EACH_EDGE (e, ei, bb->succs) { enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE ? code : inverted_code); /* invert_tree_comparison will return ERROR_MARK on FP comparsions that are not EQ/NE instead of returning proper unordered one. Be sure it is not confused with NON_CONSTANT. */ if (this_code != ERROR_MARK) { predicate p = add_condition (summary, index, size, &aggpos, this_code, unshare_expr_without_location (gimple_cond_rhs (last))); e->aux = edge_predicate_pool.allocate (); *(predicate *) e->aux = p; } } } if (TREE_CODE (op) != SSA_NAME) return; /* Special case if (builtin_constant_p (op)) constant_code else nonconstant_code. Here we can predicate nonconstant_code. We can't really handle constant_code since we have no predicate for this and also the constant code is not known to be optimized away when inliner doen't see operand is constant. Other optimizers might think otherwise. */ if (gimple_cond_code (last) != NE_EXPR || !integer_zerop (gimple_cond_rhs (last))) return; set_stmt = SSA_NAME_DEF_STMT (op); if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P) || gimple_call_num_args (set_stmt) != 1) return; op2 = gimple_call_arg (set_stmt, 0); if (!unmodified_parm_or_parm_agg_item (fbi, set_stmt, op2, &index, &size, &aggpos)) return; FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE) { predicate p = add_condition (summary, index, size, &aggpos, predicate::is_not_constant, NULL_TREE); e->aux = edge_predicate_pool.allocate (); *(predicate *) e->aux = p; } } /* If BB ends by a switch we can turn into predicates, attach corresponding predicates to the CFG edges. */ static void set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi, struct ipa_fn_summary *summary, basic_block bb) { gimple *lastg; tree op; int index; HOST_WIDE_INT size; struct agg_position_info aggpos; edge e; edge_iterator ei; size_t n; size_t case_idx; lastg = last_stmt (bb); if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH) return; gswitch *last = as_a <gswitch *> (lastg); op = gimple_switch_index (last); if (!unmodified_parm_or_parm_agg_item (fbi, last, op, &index, &size, &aggpos)) return; FOR_EACH_EDGE (e, ei, bb->succs) { e->aux = edge_predicate_pool.allocate (); *(predicate *) e->aux = false; } n = gimple_switch_num_labels (last); for (case_idx = 0; case_idx < n; ++case_idx) { tree cl = gimple_switch_label (last, case_idx); tree min, max; predicate p; e = find_edge (bb, label_to_block (CASE_LABEL (cl))); min = CASE_LOW (cl); max = CASE_HIGH (cl); /* For default we might want to construct predicate that none of cases is met, but it is bit hard to do not having negations of conditionals handy. */ if (!min && !max) p = true; else if (!max) p = add_condition (summary, index, size, &aggpos, EQ_EXPR, unshare_expr_without_location (min)); else { predicate p1, p2; p1 = add_condition (summary, index, size, &aggpos, GE_EXPR, unshare_expr_without_location (min)); p2 = add_condition (summary, index, size, &aggpos, LE_EXPR, unshare_expr_without_location (max)); p = p1 & p2; } *(struct predicate *) e->aux = p.or_with (summary->conds, *(struct predicate *) e->aux); } } /* For each BB in NODE attach to its AUX pointer predicate under which it is executable. */ static void compute_bb_predicates (struct ipa_func_body_info *fbi, struct cgraph_node *node, struct ipa_fn_summary *summary) { struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); bool done = false; basic_block bb; FOR_EACH_BB_FN (bb, my_function) { set_cond_stmt_execution_predicate (fbi, summary, bb); set_switch_stmt_execution_predicate (fbi, summary, bb); } /* Entry block is always executable. */ ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = edge_predicate_pool.allocate (); *(predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = true; /* A simple dataflow propagation of predicates forward in the CFG. TODO: work in reverse postorder. */ while (!done) { done = true; FOR_EACH_BB_FN (bb, my_function) { predicate p = false; edge e; edge_iterator ei; FOR_EACH_EDGE (e, ei, bb->preds) { if (e->src->aux) { predicate this_bb_predicate = *(predicate *) e->src->aux; if (e->aux) this_bb_predicate &= (*(struct predicate *) e->aux); p = p.or_with (summary->conds, this_bb_predicate); if (p == true) break; } } if (p == false) gcc_checking_assert (!bb->aux); else { if (!bb->aux) { done = false; bb->aux = edge_predicate_pool.allocate (); *((predicate *) bb->aux) = p; } else if (p != *(predicate *) bb->aux) { /* This OR operation is needed to ensure monotonous data flow in the case we hit the limit on number of clauses and the and/or operations above give approximate answers. */ p = p.or_with (summary->conds, *(predicate *)bb->aux); if (p != *(predicate *) bb->aux) { done = false; *((predicate *) bb->aux) = p; } } } } } } /* Return predicate specifying when the STMT might have result that is not a compile time constant. */ static predicate will_be_nonconstant_expr_predicate (struct ipa_node_params *info, struct ipa_fn_summary *summary, tree expr, vec<predicate> nonconstant_names) { tree parm; int index; HOST_WIDE_INT size; while (UNARY_CLASS_P (expr)) expr = TREE_OPERAND (expr, 0); parm = unmodified_parm (NULL, expr, &size); if (parm && (index = ipa_get_param_decl_index (info, parm)) >= 0) return add_condition (summary, index, size, NULL, predicate::changed, NULL_TREE); if (is_gimple_min_invariant (expr)) return false; if (TREE_CODE (expr) == SSA_NAME) return nonconstant_names[SSA_NAME_VERSION (expr)]; if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr)) { predicate p1 = will_be_nonconstant_expr_predicate (info, summary, TREE_OPERAND (expr, 0), nonconstant_names); if (p1 == true) return p1; predicate p2; p2 = will_be_nonconstant_expr_predicate (info, summary, TREE_OPERAND (expr, 1), nonconstant_names); return p1.or_with (summary->conds, p2); } else if (TREE_CODE (expr) == COND_EXPR) { predicate p1 = will_be_nonconstant_expr_predicate (info, summary, TREE_OPERAND (expr, 0), nonconstant_names); if (p1 == true) return p1; predicate p2; p2 = will_be_nonconstant_expr_predicate (info, summary, TREE_OPERAND (expr, 1), nonconstant_names); if (p2 == true) return p2; p1 = p1.or_with (summary->conds, p2); p2 = will_be_nonconstant_expr_predicate (info, summary, TREE_OPERAND (expr, 2), nonconstant_names); return p2.or_with (summary->conds, p1); } else { debug_tree (expr); gcc_unreachable (); } return false; } /* Return predicate specifying when the STMT might have result that is not a compile time constant. */ static predicate will_be_nonconstant_predicate (struct ipa_func_body_info *fbi, struct ipa_fn_summary *summary, gimple *stmt, vec<predicate> nonconstant_names) { predicate p = true; ssa_op_iter iter; tree use; predicate op_non_const; bool is_load; int base_index; HOST_WIDE_INT size; struct agg_position_info aggpos; /* What statments might be optimized away when their arguments are constant. */ if (gimple_code (stmt) != GIMPLE_ASSIGN && gimple_code (stmt) != GIMPLE_COND && gimple_code (stmt) != GIMPLE_SWITCH && (gimple_code (stmt) != GIMPLE_CALL || !(gimple_call_flags (stmt) & ECF_CONST))) return p; /* Stores will stay anyway. */ if (gimple_store_p (stmt)) return p; is_load = gimple_assign_load_p (stmt); /* Loads can be optimized when the value is known. */ if (is_load) { tree op; gcc_assert (gimple_assign_single_p (stmt)); op = gimple_assign_rhs1 (stmt); if (!unmodified_parm_or_parm_agg_item (fbi, stmt, op, &base_index, &size, &aggpos)) return p; } else base_index = -1; /* See if we understand all operands before we start adding conditionals. */ FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) { tree parm = unmodified_parm (stmt, use, NULL); /* For arguments we can build a condition. */ if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0) continue; if (TREE_CODE (use) != SSA_NAME) return p; /* If we know when operand is constant, we still can say something useful. */ if (nonconstant_names[SSA_NAME_VERSION (use)] != true) continue; return p; } if (is_load) op_non_const = add_condition (summary, base_index, size, &aggpos, predicate::changed, NULL); else op_non_const = false; FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) { HOST_WIDE_INT size; tree parm = unmodified_parm (stmt, use, &size); int index; if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0) { if (index != base_index) p = add_condition (summary, index, size, NULL, predicate::changed, NULL_TREE); else continue; } else p = nonconstant_names[SSA_NAME_VERSION (use)]; op_non_const = p.or_with (summary->conds, op_non_const); } if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL) && gimple_op (stmt, 0) && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME) nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))] = op_non_const; return op_non_const; } struct record_modified_bb_info { tree op; bitmap bb_set; gimple *stmt; }; /* Value is initialized in INIT_BB and used in USE_BB. We want to copute probability how often it changes between USE_BB. INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB is in different loop nest, we can do better. This is all just estimate. In theory we look for minimal cut separating INIT_BB and USE_BB, but we only want to anticipate loop invariant motion anyway. */ static basic_block get_minimal_bb (basic_block init_bb, basic_block use_bb) { struct loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father); if (l && l->header->count < init_bb->count) return l->header; return init_bb; } /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be set except for info->stmt. */ static bool record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data) { struct record_modified_bb_info *info = (struct record_modified_bb_info *) data; if (SSA_NAME_DEF_STMT (vdef) == info->stmt) return false; if (gimple_clobber_p (SSA_NAME_DEF_STMT (vdef))) return false; bitmap_set_bit (info->bb_set, SSA_NAME_IS_DEFAULT_DEF (vdef) ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index : get_minimal_bb (gimple_bb (SSA_NAME_DEF_STMT (vdef)), gimple_bb (info->stmt))->index); if (dump_file) { fprintf (dump_file, " Param "); print_generic_expr (dump_file, info->op, TDF_SLIM); fprintf (dump_file, " changed at bb %i, minimal: %i stmt: ", gimple_bb (SSA_NAME_DEF_STMT (vdef))->index, get_minimal_bb (gimple_bb (SSA_NAME_DEF_STMT (vdef)), gimple_bb (info->stmt))->index); print_gimple_stmt (dump_file, SSA_NAME_DEF_STMT (vdef), 0); } return false; } /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT will change since last invocation of STMT. Value 0 is reserved for compile time invariants. For common parameters it is REG_BR_PROB_BASE. For loop invariants it ought to be REG_BR_PROB_BASE / estimated_iters. */ static int param_change_prob (gimple *stmt, int i) { tree op = gimple_call_arg (stmt, i); basic_block bb = gimple_bb (stmt); if (TREE_CODE (op) == WITH_SIZE_EXPR) op = TREE_OPERAND (op, 0); tree base = get_base_address (op); /* Global invariants never change. */ if (is_gimple_min_invariant (base)) return 0; /* We would have to do non-trivial analysis to really work out what is the probability of value to change (i.e. when init statement is in a sibling loop of the call). We do an conservative estimate: when call is executed N times more often than the statement defining value, we take the frequency 1/N. */ if (TREE_CODE (base) == SSA_NAME) { profile_count init_count; if (!bb->count.nonzero_p ()) return REG_BR_PROB_BASE; if (SSA_NAME_IS_DEFAULT_DEF (base)) init_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count; else init_count = get_minimal_bb (gimple_bb (SSA_NAME_DEF_STMT (base)), gimple_bb (stmt))->count; if (init_count < bb->count) return MAX ((init_count.to_sreal_scale (bb->count) * REG_BR_PROB_BASE).to_int (), 1); return REG_BR_PROB_BASE; } else { ao_ref refd; profile_count max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count; struct record_modified_bb_info info; tree init = ctor_for_folding (base); if (init != error_mark_node) return 0; if (!bb->count.nonzero_p ()) return REG_BR_PROB_BASE; if (dump_file) { fprintf (dump_file, " Analyzing param change probablity of "); print_generic_expr (dump_file, op, TDF_SLIM); fprintf (dump_file, "\n"); } ao_ref_init (&refd, op); info.op = op; info.stmt = stmt; info.bb_set = BITMAP_ALLOC (NULL); walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info, NULL); if (bitmap_bit_p (info.bb_set, bb->index)) { if (dump_file) fprintf (dump_file, " Set in same BB as used.\n"); BITMAP_FREE (info.bb_set); return REG_BR_PROB_BASE; } bitmap_iterator bi; unsigned index; /* Lookup the most frequent update of the value and believe that it dominates all the other; precise analysis here is difficult. */ EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi) max = max.max (BASIC_BLOCK_FOR_FN (cfun, index)->count); if (dump_file) { fprintf (dump_file, " Set with count "); max.dump (dump_file); fprintf (dump_file, " and used with count "); bb->count.dump (dump_file); fprintf (dump_file, " freq %f\n", max.to_sreal_scale (bb->count).to_double ()); } BITMAP_FREE (info.bb_set); if (max < bb->count) return MAX ((max.to_sreal_scale (bb->count) * REG_BR_PROB_BASE).to_int (), 1); return REG_BR_PROB_BASE; } } /* Find whether a basic block BB is the final block of a (half) diamond CFG sub-graph and if the predicate the condition depends on is known. If so, return true and store the pointer the predicate in *P. */ static bool phi_result_unknown_predicate (struct ipa_node_params *info, ipa_fn_summary *summary, basic_block bb, predicate *p, vec<predicate> nonconstant_names) { edge e; edge_iterator ei; basic_block first_bb = NULL; gimple *stmt; if (single_pred_p (bb)) { *p = false; return true; } FOR_EACH_EDGE (e, ei, bb->preds) { if (single_succ_p (e->src)) { if (!single_pred_p (e->src)) return false; if (!first_bb) first_bb = single_pred (e->src); else if (single_pred (e->src) != first_bb) return false; } else { if (!first_bb) first_bb = e->src; else if (e->src != first_bb) return false; } } if (!first_bb) return false; stmt = last_stmt (first_bb); if (!stmt || gimple_code (stmt) != GIMPLE_COND || !is_gimple_ip_invariant (gimple_cond_rhs (stmt))) return false; *p = will_be_nonconstant_expr_predicate (info, summary, gimple_cond_lhs (stmt), nonconstant_names); if (*p == true) return false; else return true; } /* Given a PHI statement in a function described by inline properties SUMMARY and *P being the predicate describing whether the selected PHI argument is known, store a predicate for the result of the PHI statement into NONCONSTANT_NAMES, if possible. */ static void predicate_for_phi_result (struct ipa_fn_summary *summary, gphi *phi, predicate *p, vec<predicate> nonconstant_names) { unsigned i; for (i = 0; i < gimple_phi_num_args (phi); i++) { tree arg = gimple_phi_arg (phi, i)->def; if (!is_gimple_min_invariant (arg)) { gcc_assert (TREE_CODE (arg) == SSA_NAME); *p = p->or_with (summary->conds, nonconstant_names[SSA_NAME_VERSION (arg)]); if (*p == true) return; } } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\t\tphi predicate: "); p->dump (dump_file, summary->conds); } nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p; } /* Return predicate specifying when array index in access OP becomes non-constant. */ static predicate array_index_predicate (ipa_fn_summary *info, vec< predicate> nonconstant_names, tree op) { predicate p = false; while (handled_component_p (op)) { if (TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF) { if (TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME) p = p.or_with (info->conds, nonconstant_names[SSA_NAME_VERSION (TREE_OPERAND (op, 1))]); } op = TREE_OPERAND (op, 0); } return p; } /* For a typical usage of __builtin_expect (a<b, 1), we may introduce an extra relation stmt: With the builtin, we have t1 = a <= b; t2 = (long int) t1; t3 = __builtin_expect (t2, 1); if (t3 != 0) goto ... Without the builtin, we have if (a<=b) goto... This affects the size/time estimation and may have an impact on the earlier inlining. Here find this pattern and fix it up later. */ static gimple * find_foldable_builtin_expect (basic_block bb) { gimple_stmt_iterator bsi; for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple *stmt = gsi_stmt (bsi); if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT) || gimple_call_internal_p (stmt, IFN_BUILTIN_EXPECT)) { tree var = gimple_call_lhs (stmt); tree arg = gimple_call_arg (stmt, 0); use_operand_p use_p; gimple *use_stmt; bool match = false; bool done = false; if (!var || !arg) continue; gcc_assert (TREE_CODE (var) == SSA_NAME); while (TREE_CODE (arg) == SSA_NAME) { gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg); if (!is_gimple_assign (stmt_tmp)) break; switch (gimple_assign_rhs_code (stmt_tmp)) { case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: match = true; done = true; break; CASE_CONVERT: break; default: done = true; break; } if (done) break; arg = gimple_assign_rhs1 (stmt_tmp); } if (match && single_imm_use (var, &use_p, &use_stmt) && gimple_code (use_stmt) == GIMPLE_COND) return use_stmt; } } return NULL; } /* Return true when the basic blocks contains only clobbers followed by RESX. Such BBs are kept around to make removal of dead stores possible with presence of EH and will be optimized out by optimize_clobbers later in the game. NEED_EH is used to recurse in case the clobber has non-EH predecestors that can be clobber only, too.. When it is false, the RESX is not necessary on the end of basic block. */ static bool clobber_only_eh_bb_p (basic_block bb, bool need_eh = true) { gimple_stmt_iterator gsi = gsi_last_bb (bb); edge_iterator ei; edge e; if (need_eh) { if (gsi_end_p (gsi)) return false; if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX) return false; gsi_prev (&gsi); } else if (!single_succ_p (bb)) return false; for (; !gsi_end_p (gsi); gsi_prev (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; if (gimple_clobber_p (stmt)) continue; if (gimple_code (stmt) == GIMPLE_LABEL) break; return false; } /* See if all predecestors are either throws or clobber only BBs. */ FOR_EACH_EDGE (e, ei, bb->preds) if (!(e->flags & EDGE_EH) && !clobber_only_eh_bb_p (e->src, false)) return false; return true; } /* Return true if STMT compute a floating point expression that may be affected by -ffast-math and similar flags. */ static bool fp_expression_p (gimple *stmt) { ssa_op_iter i; tree op; FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE) if (FLOAT_TYPE_P (TREE_TYPE (op))) return true; return false; } /* Analyze function body for NODE. EARLY indicates run from early optimization pipeline. */ static void analyze_function_body (struct cgraph_node *node, bool early) { sreal time = 0; /* Estimate static overhead for function prologue/epilogue and alignment. */ int size = 2; /* Benefits are scaled by probability of elimination that is in range <0,2>. */ basic_block bb; struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); sreal freq; struct ipa_fn_summary *info = ipa_fn_summaries->get (node); predicate bb_predicate; struct ipa_func_body_info fbi; vec<predicate> nonconstant_names = vNULL; int nblocks, n; int *order; predicate array_index = true; gimple *fix_builtin_expect_stmt; gcc_assert (my_function && my_function->cfg); gcc_assert (cfun == my_function); memset(&fbi, 0, sizeof(fbi)); info->conds = NULL; info->size_time_table = NULL; /* When optimizing and analyzing for IPA inliner, initialize loop optimizer so we can produce proper inline hints. When optimizing and analyzing for early inliner, initialize node params so we can produce correct BB predicates. */ if (opt_for_fn (node->decl, optimize)) { calculate_dominance_info (CDI_DOMINATORS); if (!early) loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); else { ipa_check_create_node_params (); ipa_initialize_node_params (node); } if (ipa_node_params_sum) { fbi.node = node; fbi.info = IPA_NODE_REF (node); fbi.bb_infos = vNULL; fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun)); fbi.param_count = count_formal_params(node->decl); nonconstant_names.safe_grow_cleared (SSANAMES (my_function)->length ()); } } if (dump_file) fprintf (dump_file, "\nAnalyzing function body size: %s\n", node->name ()); /* When we run into maximal number of entries, we assign everything to the constant truth case. Be sure to have it in list. */ bb_predicate = true; info->account_size_time (0, 0, bb_predicate, bb_predicate); bb_predicate = predicate::not_inlined (); info->account_size_time (2 * ipa_fn_summary::size_scale, 0, bb_predicate, bb_predicate); if (fbi.info) compute_bb_predicates (&fbi, node, info); order = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); nblocks = pre_and_rev_post_order_compute (NULL, order, false); for (n = 0; n < nblocks; n++) { bb = BASIC_BLOCK_FOR_FN (cfun, order[n]); freq = bb->count.to_sreal_scale (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count); if (clobber_only_eh_bb_p (bb)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n Ignoring BB %i;" " it will be optimized away by cleanup_clobbers\n", bb->index); continue; } /* TODO: Obviously predicates can be propagated down across CFG. */ if (fbi.info) { if (bb->aux) bb_predicate = *(predicate *) bb->aux; else bb_predicate = false; } else bb_predicate = true; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\n BB %i predicate:", bb->index); bb_predicate.dump (dump_file, info->conds); } if (fbi.info && nonconstant_names.exists ()) { predicate phi_predicate; bool first_phi = true; for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { if (first_phi && !phi_result_unknown_predicate (fbi.info, info, bb, &phi_predicate, nonconstant_names)) break; first_phi = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " "); print_gimple_stmt (dump_file, gsi_stmt (bsi), 0); } predicate_for_phi_result (info, bsi.phi (), &phi_predicate, nonconstant_names); } } fix_builtin_expect_stmt = find_foldable_builtin_expect (bb); for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple *stmt = gsi_stmt (bsi); int this_size = estimate_num_insns (stmt, &eni_size_weights); int this_time = estimate_num_insns (stmt, &eni_time_weights); int prob; predicate will_be_nonconstant; /* This relation stmt should be folded after we remove buildin_expect call. Adjust the cost here. */ if (stmt == fix_builtin_expect_stmt) { this_size--; this_time--; } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " "); print_gimple_stmt (dump_file, stmt, 0); fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n", freq.to_double (), this_size, this_time); } if (gimple_assign_load_p (stmt) && nonconstant_names.exists ()) { predicate this_array_index; this_array_index = array_index_predicate (info, nonconstant_names, gimple_assign_rhs1 (stmt)); if (this_array_index != false) array_index &= this_array_index; } if (gimple_store_p (stmt) && nonconstant_names.exists ()) { predicate this_array_index; this_array_index = array_index_predicate (info, nonconstant_names, gimple_get_lhs (stmt)); if (this_array_index != false) array_index &= this_array_index; } if (is_gimple_call (stmt) && !gimple_call_internal_p (stmt)) { struct cgraph_edge *edge = node->get_edge (stmt); struct ipa_call_summary *es = ipa_call_summaries->get (edge); /* Special case: results of BUILT_IN_CONSTANT_P will be always resolved as constant. We however don't want to optimize out the cgraph edges. */ if (nonconstant_names.exists () && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P) && gimple_call_lhs (stmt) && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME) { predicate false_p = false; nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))] = false_p; } if (ipa_node_params_sum) { int count = gimple_call_num_args (stmt); int i; if (count) es->param.safe_grow_cleared (count); for (i = 0; i < count; i++) { int prob = param_change_prob (stmt, i); gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE); es->param[i].change_prob = prob; } } es->call_stmt_size = this_size; es->call_stmt_time = this_time; es->loop_depth = bb_loop_depth (bb); edge_set_predicate (edge, &bb_predicate); } /* TODO: When conditional jump or swithc is known to be constant, but we did not translate it into the predicates, we really can account just maximum of the possible paths. */ if (fbi.info) will_be_nonconstant = will_be_nonconstant_predicate (&fbi, info, stmt, nonconstant_names); else will_be_nonconstant = true; if (this_time || this_size) { sreal final_time = (sreal)this_time * freq; prob = eliminated_by_inlining_prob (stmt); if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\t\t50%% will be eliminated by inlining\n"); if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\t\tWill be eliminated by inlining\n"); struct predicate p = bb_predicate & will_be_nonconstant; /* We can ignore statement when we proved it is never going to happen, but we can not do that for call statements because edges are accounted specially. */ if (*(is_gimple_call (stmt) ? &bb_predicate : &p) != false) { time += final_time; size += this_size; } /* We account everything but the calls. Calls have their own size/time info attached to cgraph edges. This is necessary in order to make the cost disappear after inlining. */ if (!is_gimple_call (stmt)) { if (prob) { predicate ip = bb_predicate & predicate::not_inlined (); info->account_size_time (this_size * prob, (this_time * prob) / 2, ip, p); } if (prob != 2) info->account_size_time (this_size * (2 - prob), (this_time * (2 - prob) / 2), bb_predicate, p); } if (!info->fp_expressions && fp_expression_p (stmt)) { info->fp_expressions = true; if (dump_file) fprintf (dump_file, " fp_expression set\n"); } gcc_assert (time >= 0); gcc_assert (size >= 0); } } } set_hint_predicate (&ipa_fn_summaries->get (node)->array_index, array_index); free (order); if (nonconstant_names.exists () && !early) { struct loop *loop; predicate loop_iterations = true; predicate loop_stride = true; if (dump_file && (dump_flags & TDF_DETAILS)) flow_loops_dump (dump_file, NULL, 0); scev_initialize (); FOR_EACH_LOOP (loop, 0) { vec<edge> exits; edge ex; unsigned int j; struct tree_niter_desc niter_desc; bb_predicate = *(predicate *) loop->header->aux; exits = get_loop_exit_edges (loop); FOR_EACH_VEC_ELT (exits, j, ex) if (number_of_iterations_exit (loop, ex, &niter_desc, false) && !is_gimple_min_invariant (niter_desc.niter)) { predicate will_be_nonconstant = will_be_nonconstant_expr_predicate (fbi.info, info, niter_desc.niter, nonconstant_names); if (will_be_nonconstant != true) will_be_nonconstant = bb_predicate & will_be_nonconstant; if (will_be_nonconstant != true && will_be_nonconstant != false) /* This is slightly inprecise. We may want to represent each loop with independent predicate. */ loop_iterations &= will_be_nonconstant; } exits.release (); } /* To avoid quadratic behavior we analyze stride predicates only with respect to the containing loop. Thus we simply iterate over all defs in the outermost loop body. */ for (loop = loops_for_fn (cfun)->tree_root->inner; loop != NULL; loop = loop->next) { basic_block *body = get_loop_body (loop); for (unsigned i = 0; i < loop->num_nodes; i++) { gimple_stmt_iterator gsi; bb_predicate = *(predicate *) body[i]->aux; for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (!is_gimple_assign (stmt)) continue; tree def = gimple_assign_lhs (stmt); if (TREE_CODE (def) != SSA_NAME) continue; affine_iv iv; if (!simple_iv (loop_containing_stmt (stmt), loop_containing_stmt (stmt), def, &iv, true) || is_gimple_min_invariant (iv.step)) continue; predicate will_be_nonconstant = will_be_nonconstant_expr_predicate (fbi.info, info, iv.step, nonconstant_names); if (will_be_nonconstant != true) will_be_nonconstant = bb_predicate & will_be_nonconstant; if (will_be_nonconstant != true && will_be_nonconstant != false) /* This is slightly inprecise. We may want to represent each loop with independent predicate. */ loop_stride = loop_stride & will_be_nonconstant; } } free (body); } set_hint_predicate (&ipa_fn_summaries->get (node)->loop_iterations, loop_iterations); set_hint_predicate (&ipa_fn_summaries->get (node)->loop_stride, loop_stride); scev_finalize (); } FOR_ALL_BB_FN (bb, my_function) { edge e; edge_iterator ei; if (bb->aux) edge_predicate_pool.remove ((predicate *)bb->aux); bb->aux = NULL; FOR_EACH_EDGE (e, ei, bb->succs) { if (e->aux) edge_predicate_pool.remove ((predicate *) e->aux); e->aux = NULL; } } ipa_fn_summaries->get (node)->time = time; ipa_fn_summaries->get (node)->self_size = size; nonconstant_names.release (); ipa_release_body_info (&fbi); if (opt_for_fn (node->decl, optimize)) { if (!early) loop_optimizer_finalize (); else if (!ipa_edge_args_sum) ipa_free_all_node_params (); free_dominance_info (CDI_DOMINATORS); } if (dump_file) { fprintf (dump_file, "\n"); ipa_dump_fn_summary (dump_file, node); } } /* Compute function summary. EARLY is true when we compute parameters during early opts. */ void compute_fn_summary (struct cgraph_node *node, bool early) { HOST_WIDE_INT self_stack_size; struct cgraph_edge *e; struct ipa_fn_summary *info; gcc_assert (!node->global.inlined_to); if (!ipa_fn_summaries) ipa_fn_summary_alloc (); info = ipa_fn_summaries->get (node); info->reset (node); /* Estimate the stack size for the function if we're optimizing. */ self_stack_size = optimize && !node->thunk.thunk_p ? estimated_stack_frame_size (node) : 0; info->estimated_self_stack_size = self_stack_size; info->estimated_stack_size = self_stack_size; info->stack_frame_offset = 0; if (node->thunk.thunk_p) { struct ipa_call_summary *es = ipa_call_summaries->get (node->callees); predicate t = true; node->local.can_change_signature = false; es->call_stmt_size = eni_size_weights.call_cost; es->call_stmt_time = eni_time_weights.call_cost; info->account_size_time (ipa_fn_summary::size_scale * 2, 2, t, t); t = predicate::not_inlined (); info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t); ipa_update_overall_fn_summary (node); info->self_size = info->size; /* We can not inline instrumentation clones. */ if (node->thunk.add_pointer_bounds_args) { info->inlinable = false; node->callees->inline_failed = CIF_CHKP; } else if (stdarg_p (TREE_TYPE (node->decl))) { info->inlinable = false; node->callees->inline_failed = CIF_VARIADIC_THUNK; } else info->inlinable = true; } else { /* Even is_gimple_min_invariant rely on current_function_decl. */ push_cfun (DECL_STRUCT_FUNCTION (node->decl)); /* Can this function be inlined at all? */ if (!opt_for_fn (node->decl, optimize) && !lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl))) info->inlinable = false; else info->inlinable = tree_inlinable_function_p (node->decl); /* Type attributes can use parameter indices to describe them. */ if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)) /* Likewise for #pragma omp declare simd functions or functions with simd attribute. */ || lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl))) node->local.can_change_signature = false; else { /* Otherwise, inlinable functions always can change signature. */ if (info->inlinable) node->local.can_change_signature = true; else { /* Functions calling builtin_apply can not change signature. */ for (e = node->callees; e; e = e->next_callee) { tree cdecl = e->callee->decl; if (DECL_BUILT_IN (cdecl) && DECL_BUILT_IN_CLASS (cdecl) == BUILT_IN_NORMAL && (DECL_FUNCTION_CODE (cdecl) == BUILT_IN_APPLY_ARGS || DECL_FUNCTION_CODE (cdecl) == BUILT_IN_VA_START)) break; } node->local.can_change_signature = !e; } } /* Functions called by instrumentation thunk can't change signature because instrumentation thunk modification is not supported. */ if (node->local.can_change_signature) for (e = node->callers; e; e = e->next_caller) if (e->caller->thunk.thunk_p && e->caller->thunk.add_pointer_bounds_args) { node->local.can_change_signature = false; break; } analyze_function_body (node, early); pop_cfun (); } for (e = node->callees; e; e = e->next_callee) if (e->callee->comdat_local_p ()) break; node->calls_comdat_local = (e != NULL); /* Inlining characteristics are maintained by the cgraph_mark_inline. */ info->size = info->self_size; info->stack_frame_offset = 0; info->estimated_stack_size = info->estimated_self_stack_size; /* Code above should compute exactly the same result as ipa_update_overall_fn_summary but because computation happens in different order the roundoff errors result in slight changes. */ ipa_update_overall_fn_summary (node); gcc_assert (info->size == info->self_size); } /* Compute parameters of functions used by inliner using current_function_decl. */ static unsigned int compute_fn_summary_for_current (void) { compute_fn_summary (cgraph_node::get (current_function_decl), true); return 0; } /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS, KNOWN_CONTEXTS and KNOWN_AGGS. */ static bool estimate_edge_devirt_benefit (struct cgraph_edge *ie, int *size, int *time, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_jump_function_p> known_aggs) { tree target; struct cgraph_node *callee; struct ipa_fn_summary *isummary; enum availability avail; bool speculative; if (!known_vals.exists () && !known_contexts.exists ()) return false; if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining)) return false; target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts, known_aggs, &speculative); if (!target || speculative) return false; /* Account for difference in cost between indirect and direct calls. */ *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost); *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost); gcc_checking_assert (*time >= 0); gcc_checking_assert (*size >= 0); callee = cgraph_node::get (target); if (!callee || !callee->definition) return false; callee = callee->function_symbol (&avail); if (avail < AVAIL_AVAILABLE) return false; isummary = ipa_fn_summaries->get (callee); return isummary->inlinable; } /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to handle edge E with probability PROB. Set HINTS if edge may be devirtualized. KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call site. */ static inline void estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size, sreal *time, int prob, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_jump_function_p> known_aggs, ipa_hints *hints) { struct ipa_call_summary *es = ipa_call_summaries->get (e); int call_size = es->call_stmt_size; int call_time = es->call_stmt_time; int cur_size; if (!e->callee && estimate_edge_devirt_benefit (e, &call_size, &call_time, known_vals, known_contexts, known_aggs) && hints && e->maybe_hot_p ()) *hints |= INLINE_HINT_indirect_call; cur_size = call_size * ipa_fn_summary::size_scale; *size += cur_size; if (min_size) *min_size += cur_size; if (prob == REG_BR_PROB_BASE) *time += ((sreal)call_time) * e->sreal_frequency (); else *time += ((sreal)call_time * prob) * e->sreal_frequency (); } /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call site. */ static void estimate_calls_size_and_time (struct cgraph_node *node, int *size, int *min_size, sreal *time, ipa_hints *hints, clause_t possible_truths, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_jump_function_p> known_aggs) { struct cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) { struct ipa_call_summary *es = ipa_call_summaries->get (e); /* Do not care about zero sized builtins. */ if (e->inline_failed && !es->call_stmt_size) { gcc_checking_assert (!es->call_stmt_time); continue; } if (!es->predicate || es->predicate->evaluate (possible_truths)) { if (e->inline_failed) { /* Predicates of calls shall not use NOT_CHANGED codes, sowe do not need to compute probabilities. */ estimate_edge_size_and_time (e, size, es->predicate ? NULL : min_size, time, REG_BR_PROB_BASE, known_vals, known_contexts, known_aggs, hints); } else estimate_calls_size_and_time (e->callee, size, min_size, time, hints, possible_truths, known_vals, known_contexts, known_aggs); } } for (e = node->indirect_calls; e; e = e->next_callee) { struct ipa_call_summary *es = ipa_call_summaries->get (e); if (!es->predicate || es->predicate->evaluate (possible_truths)) estimate_edge_size_and_time (e, size, es->predicate ? NULL : min_size, time, REG_BR_PROB_BASE, known_vals, known_contexts, known_aggs, hints); } } /* Estimate size and time needed to execute NODE assuming POSSIBLE_TRUTHS clause, and KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS information about NODE's arguments. If non-NULL use also probability information present in INLINE_PARAM_SUMMARY vector. Additionally detemine hints determined by the context. Finally compute minimal size needed for the call that is independent on the call context and can be used for fast estimates. Return the values in RET_SIZE, RET_MIN_SIZE, RET_TIME and RET_HINTS. */ void estimate_node_size_and_time (struct cgraph_node *node, clause_t possible_truths, clause_t nonspec_possible_truths, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_jump_function_p> known_aggs, int *ret_size, int *ret_min_size, sreal *ret_time, sreal *ret_nonspecialized_time, ipa_hints *ret_hints, vec<inline_param_summary> inline_param_summary) { struct ipa_fn_summary *info = ipa_fn_summaries->get (node); size_time_entry *e; int size = 0; sreal time = 0; int min_size = 0; ipa_hints hints = 0; int i; if (dump_file && (dump_flags & TDF_DETAILS)) { bool found = false; fprintf (dump_file, " Estimating body: %s/%i\n" " Known to be false: ", node->name (), node->order); for (i = predicate::not_inlined_condition; i < (predicate::first_dynamic_condition + (int) vec_safe_length (info->conds)); i++) if (!(possible_truths & (1 << i))) { if (found) fprintf (dump_file, ", "); found = true; dump_condition (dump_file, info->conds, i); } } estimate_calls_size_and_time (node, &size, &min_size, &time, &hints, possible_truths, known_vals, known_contexts, known_aggs); sreal nonspecialized_time = time; for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++) { bool exec = e->exec_predicate.evaluate (nonspec_possible_truths); /* Because predicates are conservative, it can happen that nonconst is 1 but exec is 0. */ if (exec) { bool nonconst = e->nonconst_predicate.evaluate (possible_truths); gcc_checking_assert (e->time >= 0); gcc_checking_assert (time >= 0); /* We compute specialized size only because size of nonspecialized copy is context independent. The difference between nonspecialized execution and specialized is that nonspecialized is not going to have optimized out computations known to be constant in a specialized setting. */ if (nonconst) size += e->size; nonspecialized_time += e->time; if (!nonconst) ; else if (!inline_param_summary.exists ()) { if (nonconst) time += e->time; } else { int prob = e->nonconst_predicate.probability (info->conds, possible_truths, inline_param_summary); gcc_checking_assert (prob >= 0); gcc_checking_assert (prob <= REG_BR_PROB_BASE); time += e->time * prob / REG_BR_PROB_BASE; } gcc_checking_assert (time >= 0); } } gcc_checking_assert ((*info->size_time_table)[0].exec_predicate == true); gcc_checking_assert ((*info->size_time_table)[0].nonconst_predicate == true); min_size = (*info->size_time_table)[0].size; gcc_checking_assert (size >= 0); gcc_checking_assert (time >= 0); /* nonspecialized_time should be always bigger than specialized time. Roundoff issues however may get into the way. */ gcc_checking_assert ((nonspecialized_time - time * 0.99) >= -1); /* Roundoff issues may make specialized time bigger than nonspecialized time. We do not really want that to happen because some heurstics may get confused by seeing negative speedups. */ if (time > nonspecialized_time) time = nonspecialized_time; if (info->loop_iterations && !info->loop_iterations->evaluate (possible_truths)) hints |= INLINE_HINT_loop_iterations; if (info->loop_stride && !info->loop_stride->evaluate (possible_truths)) hints |= INLINE_HINT_loop_stride; if (info->array_index && !info->array_index->evaluate (possible_truths)) hints |= INLINE_HINT_array_index; if (info->scc_no) hints |= INLINE_HINT_in_scc; if (DECL_DECLARED_INLINE_P (node->decl)) hints |= INLINE_HINT_declared_inline; size = RDIV (size, ipa_fn_summary::size_scale); min_size = RDIV (min_size, ipa_fn_summary::size_scale); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n size:%i time:%f nonspec time:%f\n", (int) size, time.to_double (), nonspecialized_time.to_double ()); if (ret_time) *ret_time = time; if (ret_nonspecialized_time) *ret_nonspecialized_time = nonspecialized_time; if (ret_size) *ret_size = size; if (ret_min_size) *ret_min_size = min_size; if (ret_hints) *ret_hints = hints; return; } /* Estimate size and time needed to execute callee of EDGE assuming that parameters known to be constant at caller of EDGE are propagated. KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values and types for parameters. */ void estimate_ipcp_clone_size_and_time (struct cgraph_node *node, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_jump_function_p> known_aggs, int *ret_size, sreal *ret_time, sreal *ret_nonspec_time, ipa_hints *hints) { clause_t clause, nonspec_clause; evaluate_conditions_for_known_args (node, false, known_vals, known_aggs, &clause, &nonspec_clause); estimate_node_size_and_time (node, clause, nonspec_clause, known_vals, known_contexts, known_aggs, ret_size, NULL, ret_time, ret_nonspec_time, hints, vNULL); } /* Update summary information of inline clones after inlining. Compute peak stack usage. */ static void inline_update_callee_summaries (struct cgraph_node *node, int depth) { struct cgraph_edge *e; struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (node); struct ipa_fn_summary *caller_info = ipa_fn_summaries->get (node->callers->caller); HOST_WIDE_INT peak; callee_info->stack_frame_offset = caller_info->stack_frame_offset + caller_info->estimated_self_stack_size; peak = callee_info->stack_frame_offset + callee_info->estimated_self_stack_size; if (ipa_fn_summaries->get (node->global.inlined_to)->estimated_stack_size < peak) ipa_fn_summaries->get (node->global.inlined_to)->estimated_stack_size = peak; ipa_propagate_frequency (node); for (e = node->callees; e; e = e->next_callee) { if (!e->inline_failed) inline_update_callee_summaries (e->callee, depth); ipa_call_summaries->get (e)->loop_depth += depth; } for (e = node->indirect_calls; e; e = e->next_callee) ipa_call_summaries->get (e)->loop_depth += depth; } /* Update change_prob of EDGE after INLINED_EDGE has been inlined. When functoin A is inlined in B and A calls C with parameter that changes with probability PROB1 and C is known to be passthroug of argument if B that change with probability PROB2, the probability of change is now PROB1*PROB2. */ static void remap_edge_change_prob (struct cgraph_edge *inlined_edge, struct cgraph_edge *edge) { if (ipa_node_params_sum) { int i; struct ipa_edge_args *args = IPA_EDGE_REF (edge); struct ipa_call_summary *es = ipa_call_summaries->get (edge); struct ipa_call_summary *inlined_es = ipa_call_summaries->get (inlined_edge); for (i = 0; i < ipa_get_cs_argument_count (args); i++) { struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i); if (jfunc->type == IPA_JF_PASS_THROUGH || jfunc->type == IPA_JF_ANCESTOR) { int id = jfunc->type == IPA_JF_PASS_THROUGH ? ipa_get_jf_pass_through_formal_id (jfunc) : ipa_get_jf_ancestor_formal_id (jfunc); if (id < (int) inlined_es->param.length ()) { int prob1 = es->param[i].change_prob; int prob2 = inlined_es->param[id].change_prob; int prob = combine_probabilities (prob1, prob2); if (prob1 && prob2 && !prob) prob = 1; es->param[i].change_prob = prob; } } } } } /* Update edge summaries of NODE after INLINED_EDGE has been inlined. Remap predicates of callees of NODE. Rest of arguments match remap_predicate. Also update change probabilities. */ static void remap_edge_summaries (struct cgraph_edge *inlined_edge, struct cgraph_node *node, struct ipa_fn_summary *info, struct ipa_fn_summary *callee_info, vec<int> operand_map, vec<int> offset_map, clause_t possible_truths, predicate *toplev_predicate) { struct cgraph_edge *e, *next; for (e = node->callees; e; e = next) { struct ipa_call_summary *es = ipa_call_summaries->get (e); predicate p; next = e->next_callee; if (e->inline_failed) { remap_edge_change_prob (inlined_edge, e); if (es->predicate) { p = es->predicate->remap_after_inlining (info, callee_info, operand_map, offset_map, possible_truths, *toplev_predicate); edge_set_predicate (e, &p); } else edge_set_predicate (e, toplev_predicate); } else remap_edge_summaries (inlined_edge, e->callee, info, callee_info, operand_map, offset_map, possible_truths, toplev_predicate); } for (e = node->indirect_calls; e; e = next) { struct ipa_call_summary *es = ipa_call_summaries->get (e); predicate p; next = e->next_callee; remap_edge_change_prob (inlined_edge, e); if (es->predicate) { p = es->predicate->remap_after_inlining (info, callee_info, operand_map, offset_map, possible_truths, *toplev_predicate); edge_set_predicate (e, &p); } else edge_set_predicate (e, toplev_predicate); } } /* Same as remap_predicate, but set result into hint *HINT. */ static void remap_hint_predicate (struct ipa_fn_summary *info, struct ipa_fn_summary *callee_info, predicate **hint, vec<int> operand_map, vec<int> offset_map, clause_t possible_truths, predicate *toplev_predicate) { predicate p; if (!*hint) return; p = (*hint)->remap_after_inlining (info, callee_info, operand_map, offset_map, possible_truths, *toplev_predicate); if (p != false && p != true) { if (!*hint) set_hint_predicate (hint, p); else **hint &= p; } } /* We inlined EDGE. Update summary of the function we inlined into. */ void ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge) { struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee); struct cgraph_node *to = (edge->caller->global.inlined_to ? edge->caller->global.inlined_to : edge->caller); struct ipa_fn_summary *info = ipa_fn_summaries->get (to); clause_t clause = 0; /* not_inline is known to be false. */ size_time_entry *e; vec<int> operand_map = vNULL; vec<int> offset_map = vNULL; int i; predicate toplev_predicate; predicate true_p = true; struct ipa_call_summary *es = ipa_call_summaries->get (edge); if (es->predicate) toplev_predicate = *es->predicate; else toplev_predicate = true; info->fp_expressions |= callee_info->fp_expressions; if (callee_info->conds) evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL, NULL); if (ipa_node_params_sum && callee_info->conds) { struct ipa_edge_args *args = IPA_EDGE_REF (edge); int count = ipa_get_cs_argument_count (args); int i; if (count) { operand_map.safe_grow_cleared (count); offset_map.safe_grow_cleared (count); } for (i = 0; i < count; i++) { struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i); int map = -1; /* TODO: handle non-NOPs when merging. */ if (jfunc->type == IPA_JF_PASS_THROUGH) { if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR) map = ipa_get_jf_pass_through_formal_id (jfunc); if (!ipa_get_jf_pass_through_agg_preserved (jfunc)) offset_map[i] = -1; } else if (jfunc->type == IPA_JF_ANCESTOR) { HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc); if (offset >= 0 && offset < INT_MAX) { map = ipa_get_jf_ancestor_formal_id (jfunc); if (!ipa_get_jf_ancestor_agg_preserved (jfunc)) offset = -1; offset_map[i] = offset; } } operand_map[i] = map; gcc_assert (map < ipa_get_param_count (IPA_NODE_REF (to))); } } for (i = 0; vec_safe_iterate (callee_info->size_time_table, i, &e); i++) { predicate p; p = e->exec_predicate.remap_after_inlining (info, callee_info, operand_map, offset_map, clause, toplev_predicate); predicate nonconstp; nonconstp = e->nonconst_predicate.remap_after_inlining (info, callee_info, operand_map, offset_map, clause, toplev_predicate); if (p != false && nonconstp != false) { sreal add_time = ((sreal)e->time * edge->sreal_frequency ()); int prob = e->nonconst_predicate.probability (callee_info->conds, clause, es->param); add_time = add_time * prob / REG_BR_PROB_BASE; if (prob != REG_BR_PROB_BASE && dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\t\tScaling time by probability:%f\n", (double) prob / REG_BR_PROB_BASE); } info->account_size_time (e->size, add_time, p, nonconstp); } } remap_edge_summaries (edge, edge->callee, info, callee_info, operand_map, offset_map, clause, &toplev_predicate); remap_hint_predicate (info, callee_info, &callee_info->loop_iterations, operand_map, offset_map, clause, &toplev_predicate); remap_hint_predicate (info, callee_info, &callee_info->loop_stride, operand_map, offset_map, clause, &toplev_predicate); remap_hint_predicate (info, callee_info, &callee_info->array_index, operand_map, offset_map, clause, &toplev_predicate); inline_update_callee_summaries (edge->callee, ipa_call_summaries->get (edge)->loop_depth); /* We do not maintain predicates of inlined edges, free it. */ edge_set_predicate (edge, &true_p); /* Similarly remove param summaries. */ es->param.release (); operand_map.release (); offset_map.release (); } /* For performance reasons ipa_merge_fn_summary_after_inlining is not updating overall size and time. Recompute it. */ void ipa_update_overall_fn_summary (struct cgraph_node *node) { struct ipa_fn_summary *info = ipa_fn_summaries->get (node); size_time_entry *e; int i; info->size = 0; info->time = 0; for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++) { info->size += e->size; info->time += e->time; } estimate_calls_size_and_time (node, &info->size, &info->min_size, &info->time, NULL, ~(clause_t) (1 << predicate::false_condition), vNULL, vNULL, vNULL); info->size = (info->size + ipa_fn_summary::size_scale / 2) / ipa_fn_summary::size_scale; } /* This function performs intraprocedural analysis in NODE that is required to inline indirect calls. */ static void inline_indirect_intraprocedural_analysis (struct cgraph_node *node) { ipa_analyze_node (node); if (dump_file && (dump_flags & TDF_DETAILS)) { ipa_print_node_params (dump_file, node); ipa_print_node_jump_functions (dump_file, node); } } /* Note function body size. */ void inline_analyze_function (struct cgraph_node *node) { push_cfun (DECL_STRUCT_FUNCTION (node->decl)); if (dump_file) fprintf (dump_file, "\nAnalyzing function: %s/%u\n", node->name (), node->order); if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p) inline_indirect_intraprocedural_analysis (node); compute_fn_summary (node, false); if (!optimize) { struct cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED; for (e = node->indirect_calls; e; e = e->next_callee) e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED; } pop_cfun (); } /* Called when new function is inserted to callgraph late. */ void ipa_fn_summary_t::insert (struct cgraph_node *node, ipa_fn_summary *) { inline_analyze_function (node); } /* Note function body size. */ static void ipa_fn_summary_generate (void) { struct cgraph_node *node; FOR_EACH_DEFINED_FUNCTION (node) if (DECL_STRUCT_FUNCTION (node->decl)) node->local.versionable = tree_versionable_function_p (node->decl); ipa_fn_summary_alloc (); ipa_fn_summaries->enable_insertion_hook (); ipa_register_cgraph_hooks (); FOR_EACH_DEFINED_FUNCTION (node) if (!node->alias && (flag_generate_lto || flag_generate_offload|| flag_wpa || opt_for_fn (node->decl, optimize))) inline_analyze_function (node); } /* Write inline summary for edge E to OB. */ static void read_ipa_call_summary (struct lto_input_block *ib, struct cgraph_edge *e) { struct ipa_call_summary *es = ipa_call_summaries->get (e); predicate p; int length, i; es->call_stmt_size = streamer_read_uhwi (ib); es->call_stmt_time = streamer_read_uhwi (ib); es->loop_depth = streamer_read_uhwi (ib); bitpack_d bp = streamer_read_bitpack (ib); es->is_return_callee_uncaptured = bp_unpack_value (&bp, 1); p.stream_in (ib); edge_set_predicate (e, &p); length = streamer_read_uhwi (ib); if (length) { es->param.safe_grow_cleared (length); for (i = 0; i < length; i++) es->param[i].change_prob = streamer_read_uhwi (ib); } } /* Stream in inline summaries from the section. */ static void inline_read_section (struct lto_file_decl_data *file_data, const char *data, size_t len) { const struct lto_function_header *header = (const struct lto_function_header *) data; const int cfg_offset = sizeof (struct lto_function_header); const int main_offset = cfg_offset + header->cfg_size; const int string_offset = main_offset + header->main_size; struct data_in *data_in; unsigned int i, count2, j; unsigned int f_count; lto_input_block ib ((const char *) data + main_offset, header->main_size, file_data->mode_table); data_in = lto_data_in_create (file_data, (const char *) data + string_offset, header->string_size, vNULL); f_count = streamer_read_uhwi (&ib); for (i = 0; i < f_count; i++) { unsigned int index; struct cgraph_node *node; struct ipa_fn_summary *info; lto_symtab_encoder_t encoder; struct bitpack_d bp; struct cgraph_edge *e; predicate p; index = streamer_read_uhwi (&ib); encoder = file_data->symtab_node_encoder; node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder, index)); info = ipa_fn_summaries->get (node); info->estimated_stack_size = info->estimated_self_stack_size = streamer_read_uhwi (&ib); info->size = info->self_size = streamer_read_uhwi (&ib); info->time = sreal::stream_in (&ib); bp = streamer_read_bitpack (&ib); info->inlinable = bp_unpack_value (&bp, 1); info->fp_expressions = bp_unpack_value (&bp, 1); count2 = streamer_read_uhwi (&ib); gcc_assert (!info->conds); for (j = 0; j < count2; j++) { struct condition c; c.operand_num = streamer_read_uhwi (&ib); c.size = streamer_read_uhwi (&ib); c.code = (enum tree_code) streamer_read_uhwi (&ib); c.val = stream_read_tree (&ib, data_in); bp = streamer_read_bitpack (&ib); c.agg_contents = bp_unpack_value (&bp, 1); c.by_ref = bp_unpack_value (&bp, 1); if (c.agg_contents) c.offset = streamer_read_uhwi (&ib); vec_safe_push (info->conds, c); } count2 = streamer_read_uhwi (&ib); gcc_assert (!info->size_time_table); for (j = 0; j < count2; j++) { struct size_time_entry e; e.size = streamer_read_uhwi (&ib); e.time = sreal::stream_in (&ib); e.exec_predicate.stream_in (&ib); e.nonconst_predicate.stream_in (&ib); vec_safe_push (info->size_time_table, e); } p.stream_in (&ib); set_hint_predicate (&info->loop_iterations, p); p.stream_in (&ib); set_hint_predicate (&info->loop_stride, p); p.stream_in (&ib); set_hint_predicate (&info->array_index, p); for (e = node->callees; e; e = e->next_callee) read_ipa_call_summary (&ib, e); for (e = node->indirect_calls; e; e = e->next_callee) read_ipa_call_summary (&ib, e); } lto_free_section_data (file_data, LTO_section_ipa_fn_summary, NULL, data, len); lto_data_in_delete (data_in); } /* Read inline summary. Jump functions are shared among ipa-cp and inliner, so when ipa-cp is active, we don't need to write them twice. */ static void ipa_fn_summary_read (void) { struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data (); struct lto_file_decl_data *file_data; unsigned int j = 0; ipa_fn_summary_alloc (); while ((file_data = file_data_vec[j++])) { size_t len; const char *data = lto_get_section_data (file_data, LTO_section_ipa_fn_summary, NULL, &len); if (data) inline_read_section (file_data, data, len); else /* Fatal error here. We do not want to support compiling ltrans units with different version of compiler or different flags than the WPA unit, so this should never happen. */ fatal_error (input_location, "ipa inline summary is missing in input file"); } ipa_register_cgraph_hooks (); if (!flag_ipa_cp) ipa_prop_read_jump_functions (); gcc_assert (ipa_fn_summaries); ipa_fn_summaries->enable_insertion_hook (); } /* Write inline summary for edge E to OB. */ static void write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e) { struct ipa_call_summary *es = ipa_call_summaries->get (e); int i; streamer_write_uhwi (ob, es->call_stmt_size); streamer_write_uhwi (ob, es->call_stmt_time); streamer_write_uhwi (ob, es->loop_depth); bitpack_d bp = bitpack_create (ob->main_stream); bp_pack_value (&bp, es->is_return_callee_uncaptured, 1); streamer_write_bitpack (&bp); if (es->predicate) es->predicate->stream_out (ob); else streamer_write_uhwi (ob, 0); streamer_write_uhwi (ob, es->param.length ()); for (i = 0; i < (int) es->param.length (); i++) streamer_write_uhwi (ob, es->param[i].change_prob); } /* Write inline summary for node in SET. Jump functions are shared among ipa-cp and inliner, so when ipa-cp is active, we don't need to write them twice. */ static void ipa_fn_summary_write (void) { struct output_block *ob = create_output_block (LTO_section_ipa_fn_summary); lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder; unsigned int count = 0; int i; for (i = 0; i < lto_symtab_encoder_size (encoder); i++) { symtab_node *snode = lto_symtab_encoder_deref (encoder, i); cgraph_node *cnode = dyn_cast <cgraph_node *> (snode); if (cnode && cnode->definition && !cnode->alias) count++; } streamer_write_uhwi (ob, count); for (i = 0; i < lto_symtab_encoder_size (encoder); i++) { symtab_node *snode = lto_symtab_encoder_deref (encoder, i); cgraph_node *cnode = dyn_cast <cgraph_node *> (snode); if (cnode && cnode->definition && !cnode->alias) { struct ipa_fn_summary *info = ipa_fn_summaries->get (cnode); struct bitpack_d bp; struct cgraph_edge *edge; int i; size_time_entry *e; struct condition *c; streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode)); streamer_write_hwi (ob, info->estimated_self_stack_size); streamer_write_hwi (ob, info->self_size); info->time.stream_out (ob); bp = bitpack_create (ob->main_stream); bp_pack_value (&bp, info->inlinable, 1); bp_pack_value (&bp, false, 1); bp_pack_value (&bp, info->fp_expressions, 1); streamer_write_bitpack (&bp); streamer_write_uhwi (ob, vec_safe_length (info->conds)); for (i = 0; vec_safe_iterate (info->conds, i, &c); i++) { streamer_write_uhwi (ob, c->operand_num); streamer_write_uhwi (ob, c->size); streamer_write_uhwi (ob, c->code); stream_write_tree (ob, c->val, true); bp = bitpack_create (ob->main_stream); bp_pack_value (&bp, c->agg_contents, 1); bp_pack_value (&bp, c->by_ref, 1); streamer_write_bitpack (&bp); if (c->agg_contents) streamer_write_uhwi (ob, c->offset); } streamer_write_uhwi (ob, vec_safe_length (info->size_time_table)); for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++) { streamer_write_uhwi (ob, e->size); e->time.stream_out (ob); e->exec_predicate.stream_out (ob); e->nonconst_predicate.stream_out (ob); } if (info->loop_iterations) info->loop_iterations->stream_out (ob); else streamer_write_uhwi (ob, 0); if (info->loop_stride) info->loop_stride->stream_out (ob); else streamer_write_uhwi (ob, 0); if (info->array_index) info->array_index->stream_out (ob); else streamer_write_uhwi (ob, 0); for (edge = cnode->callees; edge; edge = edge->next_callee) write_ipa_call_summary (ob, edge); for (edge = cnode->indirect_calls; edge; edge = edge->next_callee) write_ipa_call_summary (ob, edge); } } streamer_write_char_stream (ob->main_stream, 0); produce_asm (ob, NULL); destroy_output_block (ob); if (!flag_ipa_cp) ipa_prop_write_jump_functions (); } /* Release inline summary. */ void ipa_free_fn_summary (void) { struct cgraph_node *node; if (!ipa_call_summaries) return; FOR_EACH_DEFINED_FUNCTION (node) if (!node->alias) ipa_fn_summaries->get (node)->reset (node); ipa_fn_summaries->release (); ipa_fn_summaries = NULL; ipa_call_summaries->release (); delete ipa_call_summaries; ipa_call_summaries = NULL; edge_predicate_pool.release (); } namespace { const pass_data pass_data_local_fn_summary = { GIMPLE_PASS, /* type */ "local-fnsummary", /* name */ OPTGROUP_INLINE, /* optinfo_flags */ TV_INLINE_PARAMETERS, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_local_fn_summary : public gimple_opt_pass { public: pass_local_fn_summary (gcc::context *ctxt) : gimple_opt_pass (pass_data_local_fn_summary, ctxt) {} /* opt_pass methods: */ opt_pass * clone () { return new pass_local_fn_summary (m_ctxt); } virtual unsigned int execute (function *) { return compute_fn_summary_for_current (); } }; // class pass_local_fn_summary } // anon namespace gimple_opt_pass * make_pass_local_fn_summary (gcc::context *ctxt) { return new pass_local_fn_summary (ctxt); } /* Free inline summary. */ namespace { const pass_data pass_data_ipa_free_fn_summary = { SIMPLE_IPA_PASS, /* type */ "free-fnsummary", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_IPA_FREE_INLINE_SUMMARY, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_ipa_free_fn_summary : public simple_ipa_opt_pass { public: pass_ipa_free_fn_summary (gcc::context *ctxt) : simple_ipa_opt_pass (pass_data_ipa_free_fn_summary, ctxt), small_p (false) {} /* opt_pass methods: */ opt_pass *clone () { return new pass_ipa_free_fn_summary (m_ctxt); } void set_pass_param (unsigned int n, bool param) { gcc_assert (n == 0); small_p = param; } virtual bool gate (function *) { return small_p || !flag_wpa; } virtual unsigned int execute (function *) { ipa_free_fn_summary (); /* Early optimizations may make function unreachable. We can not remove unreachable functions as part of the early opts pass because TODOs are run before subpasses. Do it here. */ return small_p ? TODO_remove_functions | TODO_dump_symtab : 0; } private: bool small_p; }; // class pass_ipa_free_fn_summary } // anon namespace simple_ipa_opt_pass * make_pass_ipa_free_fn_summary (gcc::context *ctxt) { return new pass_ipa_free_fn_summary (ctxt); } namespace { const pass_data pass_data_ipa_fn_summary = { IPA_PASS, /* type */ "fnsummary", /* name */ OPTGROUP_INLINE, /* optinfo_flags */ TV_IPA_FNSUMMARY, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ ( TODO_dump_symtab ), /* todo_flags_finish */ }; class pass_ipa_fn_summary : public ipa_opt_pass_d { public: pass_ipa_fn_summary (gcc::context *ctxt) : ipa_opt_pass_d (pass_data_ipa_fn_summary, ctxt, ipa_fn_summary_generate, /* generate_summary */ ipa_fn_summary_write, /* write_summary */ ipa_fn_summary_read, /* read_summary */ NULL, /* write_optimization_summary */ NULL, /* read_optimization_summary */ NULL, /* stmt_fixup */ 0, /* function_transform_todo_flags_start */ NULL, /* function_transform */ NULL) /* variable_transform */ {} /* opt_pass methods: */ virtual unsigned int execute (function *) { return 0; } }; // class pass_ipa_fn_summary } // anon namespace ipa_opt_pass_d * make_pass_ipa_fn_summary (gcc::context *ctxt) { return new pass_ipa_fn_summary (ctxt); } /* Reset all state within ipa-fnsummary.c so that we can rerun the compiler within the same process. For use by toplev::finalize. */ void ipa_fnsummary_c_finalize (void) { ipa_free_fn_summary (); }
FlopCounterFunctor.h
/** * @file FlopCounterFunctor.h * * @date 22 Jan 2018 * @author tchipevn */ #pragma once #include "autopas/pairwiseFunctors/Functor.h" #include "autopas/utils/ArrayMath.h" namespace autopas { /** * This class helps in getting the number of performed floating point * operations. It is a functor that only calculated the amount of floating point * operations. * @todo this class currently is limited to the following case: * - constant cutoff radius * - constant amount of floating point operations for one kernel call (distance * < cutoff) * @tparam Particle * @tparam ParticleCell */ template <class Particle, class ParticleCell> class FlopCounterFunctor : public Functor<Particle, ParticleCell, typename Particle::SoAArraysType, FlopCounterFunctor<Particle, ParticleCell>> { public: bool isRelevantForTuning() override { return false; } bool allowsNewton3() override { return true; } bool allowsNonNewton3() override { return true; } bool isAppropriateClusterSize(unsigned int clusterSize, DataLayoutOption::Value dataLayout) const override { return dataLayout == DataLayoutOption::aos; // no support for clusters yet, unless aos. } /** * constructor of FlopCounterFunctor * @param cutoffRadius the cutoff radius */ explicit FlopCounterFunctor<Particle, ParticleCell>(double cutoffRadius) : autopas::Functor<Particle, ParticleCell, typename Particle::SoAArraysType, FlopCounterFunctor<Particle, ParticleCell>>(cutoffRadius), _cutoffSquare(cutoffRadius * cutoffRadius), _distanceCalculations(0ul), _kernelCalls(0ul) {} void AoSFunctor(Particle &i, Particle &j, bool newton3) override { auto dr = utils::ArrayMath::sub(i.getR(), j.getR()); double dr2 = utils::ArrayMath::dot(dr, dr); #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { ++_distanceCalculations; if (dr2 <= _cutoffSquare) ++_kernelCalls; }; } void SoAFunctor(SoAView<typename Particle::SoAArraysType> soa, bool newton3) override { if (soa.getNumParticles() == 0) return; double *const __restrict__ x1ptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ y1ptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ z1ptr = soa.template begin<Particle::AttributeNames::posZ>(); for (unsigned int i = 0; i < soa.getNumParticles(); ++i) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) for (unsigned int j = i + 1; j < soa.getNumParticles(); ++j) { ++distanceCalculationsAcc; const double drx = x1ptr[i] - x1ptr[j]; const double dry = y1ptr[i] - y1ptr[j]; const double drz = z1ptr[i] - z1ptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) ++kernelCallsAcc; } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } void SoAFunctor(SoAView<typename Particle::SoAArraysType> soa1, SoAView<typename Particle::SoAArraysType> soa2, bool newton3) override { double *const __restrict__ x1ptr = soa1.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ y1ptr = soa1.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ z1ptr = soa1.template begin<Particle::AttributeNames::posZ>(); double *const __restrict__ x2ptr = soa2.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ y2ptr = soa2.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ z2ptr = soa2.template begin<Particle::AttributeNames::posZ>(); for (unsigned int i = 0; i < soa1.getNumParticles(); ++i) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) for (unsigned int j = 0; j < soa2.getNumParticles(); ++j) { ++distanceCalculationsAcc; const double drx = x1ptr[i] - x2ptr[j]; const double dry = y1ptr[i] - y2ptr[j]; const double drz = z1ptr[i] - z2ptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) { ++kernelCallsAcc; } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } void SoAFunctor(SoAView<typename Particle::SoAArraysType> soa, const std::vector<std::vector<size_t, autopas::AlignedAllocator<size_t>>> &neighborList, size_t iFrom, size_t iTo, bool newton3) override { auto numParts = soa.getNumParticles(); if (numParts == 0) return; double *const __restrict__ xptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ yptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ zptr = soa.template begin<Particle::AttributeNames::posZ>(); for (size_t i = iFrom; i < iTo; ++i) { const size_t listSizeI = neighborList[i].size(); const size_t *const __restrict__ currentList = neighborList[i].data(); // this is a magic number, that should correspond to at least // vectorization width*N have testet multiple sizes: // 4: small speedup compared to AoS // 8: small speedup compared to AoS // 12: small but best speedup compared to Aos // 16: smaller speedup // in theory this is a variable, we could auto-tune over... #ifdef __AVX512F__ // use a multiple of 8 for avx const size_t vecsize = 16; #else // for everything else 12 is faster const size_t vecsize = 12; #endif size_t joff = 0; // if the size of the verlet list is larger than the given size vecsize, // we will use a vectorized version. if (listSizeI >= vecsize) { alignas(64) std::array<double, vecsize> xtmp{}, ytmp{}, ztmp{}, xArr{}, yArr{}, zArr{}; // broadcast of the position of particle i for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xtmp[tmpj] = xptr[i]; ytmp[tmpj] = yptr[i]; ztmp[tmpj] = zptr[i]; } // loop over the verlet list from 0 to x*vecsize for (; joff < listSizeI - vecsize + 1; joff += vecsize) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // in each iteration we calculate the interactions of particle i with // vecsize particles in the neighborlist of particle i starting at // particle joff // gather position of particle j #pragma omp simd safelen(vecsize) for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xArr[tmpj] = xptr[currentList[joff + tmpj]]; yArr[tmpj] = yptr[currentList[joff + tmpj]]; zArr[tmpj] = zptr[currentList[joff + tmpj]]; } // do omp simd with reduction of the interaction #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) safelen(vecsize) for (size_t j = 0; j < vecsize; j++) { ++distanceCalculationsAcc; const double drx = xtmp[j] - xArr[j]; const double dry = ytmp[j] - yArr[j]; const double drz = ztmp[j] - zArr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; const unsigned long mask = (dr2 <= _cutoffSquare) ? 1 : 0; kernelCallsAcc += mask; } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // this loop goes over the remainder and uses no optimizations for (size_t jNeighIndex = joff; jNeighIndex < listSizeI; ++jNeighIndex) { size_t j = neighborList[i][jNeighIndex]; if (i == j) continue; ++distanceCalculationsAcc; const double drx = xptr[i] - xptr[j]; const double dry = yptr[i] - yptr[j]; const double drz = zptr[i] - zptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) { ++kernelCallsAcc; } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } void CudaFunctor(CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle, bool newton3) override { #if defined(AUTOPAS_CUDA) // estimate flops on GPU size_t size = device_handle.template get<Particle::AttributeNames::posX>().size(); _distanceCalculations += size * size; _kernelCalls += size * size; #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } void CudaFunctor(CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle1, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle2, bool newton3) override { #if defined(AUTOPAS_CUDA) // estimate flops on GPU size_t size1 = device_handle1.template get<Particle::AttributeNames::posX>().size(); size_t size2 = device_handle2.template get<Particle::AttributeNames::posX>().size(); _distanceCalculations += size1 * size2; _kernelCalls += size1 * size2; #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::deviceSoALoader */ void deviceSoALoader(::autopas::SoA<typename Particle::SoAArraysType> &soa, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle) override { #if defined(AUTOPAS_CUDA) size_t size = soa.getNumParticles(); if (size == 0) return; device_handle.template get<Particle::AttributeNames::posX>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posX>()); device_handle.template get<Particle::AttributeNames::posY>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posY>()); device_handle.template get<Particle::AttributeNames::posZ>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posZ>()); device_handle.template get<Particle::AttributeNames::forceX>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceX>()); device_handle.template get<Particle::AttributeNames::forceY>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceY>()); device_handle.template get<Particle::AttributeNames::forceZ>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceZ>()); #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::deviceSoAExtractor */ void deviceSoAExtractor(::autopas::SoA<typename Particle::SoAArraysType> &soa, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle) override { #if defined(AUTOPAS_CUDA) size_t size = soa.getNumParticles(); if (size == 0) return; device_handle.template get<Particle::AttributeNames::forceX>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceX>()); device_handle.template get<Particle::AttributeNames::forceY>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceY>()); device_handle.template get<Particle::AttributeNames::forceZ>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceZ>()); #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::getNeededAttr() */ constexpr static const std::array<typename Particle::AttributeNames, 3> getNeededAttr() { return std::array<typename Particle::AttributeNames, 3>{ Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ}; } /** * @copydoc Functor::getNeededAttr(std::false_type) */ constexpr static const std::array<typename Particle::AttributeNames, 3> getNeededAttr(std::false_type) { return getNeededAttr(); } /** * @copydoc Functor::getComputedAttr() */ constexpr static const std::array<typename Particle::AttributeNames, 0> getComputedAttr() { return std::array<typename Particle::AttributeNames, 0>{/*Nothing*/}; } /** * get the hit rate of the pair-wise interaction, i.e. the ratio of the number * of kernel calls compared to the number of distance calculations * @return the hit rate */ double getHitRate() { return static_cast<double>(_kernelCalls) / static_cast<double>(_distanceCalculations); } /** * get the total number of flops * @param numFlopsPerKernelCall * @return */ double getFlops(unsigned long numFlopsPerKernelCall) const { const double distFlops = numFlopsPerDistanceCalculation * static_cast<double>(_distanceCalculations); const double kernFlops = numFlopsPerKernelCall * static_cast<double>(_kernelCalls); return distFlops + kernFlops; } /** * get the number of calculated distance operations * @return */ unsigned long getDistanceCalculations() const { return _distanceCalculations; } /** * get the number of kernel calls, i.e. the number of pairs of particles with * a distance not larger than the cutoff * @return */ unsigned long getKernelCalls() const { return _kernelCalls; } /** * number of flops for one distance calculation. * 3 sub + 3 square + 2 add */ static constexpr double numFlopsPerDistanceCalculation = 8.0; private: double _cutoffSquare; unsigned long _distanceCalculations, _kernelCalls; }; } // namespace autopas
lsh_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ /*********************************************************************** * Author: Vincent Rabaud *************************************************************************/ #ifndef FLANN_LSH_INDEX_H_ #define FLANN_LSH_INDEX_H_ #include <algorithm> #include <cassert> #include <cstring> #include <map> #include <vector> #include "flann/general.h" #include "flann/algorithms/nn_index.h" #include "flann/util/matrix.h" #include "flann/util/result_set.h" #include "flann/util/heap.h" #include "flann/util/lsh_table.h" #include "flann/util/allocator.h" #include "flann/util/random.h" #include "flann/util/saving.h" namespace flann { struct LshIndexParams : public IndexParams { LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) { (* this)["algorithm"] = FLANN_INDEX_LSH; // The number of hash tables to use (*this)["table_number"] = table_number; // The length of the key in the hash tables (*this)["key_size"] = key_size; // Number of levels to use in multi-probe (0 for standard LSH) (*this)["multi_probe_level"] = multi_probe_level; } }; /** * Randomized kd-tree index * * Contains the k-d trees and other information for indexing a set of points * for nearest-neighbor matching. */ template<typename Distance> class LshIndex : public NNIndex<Distance> { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; typedef NNIndex<Distance> BaseClass; /** Constructor * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); } /** Constructor * @param input_data dataset with the input features * @param params parameters passed to the LSH algorithm * @param d the distance used */ LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(), Distance d = Distance()) : BaseClass(params, d) { table_number_ = get_param<unsigned int>(index_params_,"table_number",12); key_size_ = get_param<unsigned int>(index_params_,"key_size",20); multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2); fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); setDataset(input_data); } LshIndex(const LshIndex& other) : BaseClass(other), tables_(other.tables_), table_number_(other.table_number_), key_size_(other.key_size_), multi_probe_level_(other.multi_probe_level_), xor_masks_(other.xor_masks_) { } LshIndex& operator=(LshIndex other) { this->swap(other); return *this; } virtual ~LshIndex() { freeIndex(); } BaseClass* clone() const { return new LshIndex(*this); } using BaseClass::buildIndex; void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { assert(points.cols==veclen_); size_t old_size = size_; extendDataset(points); if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) { buildIndex(); } else { for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; for (size_t i=old_size;i<size_;++i) { table.add(i, points_[i]); } } } } flann_algorithm_t getType() const { return FLANN_INDEX_LSH; } template<typename Archive> void serialize(Archive& ar) { ar.setObject(this); ar & *static_cast<NNIndex<Distance>*>(this); ar & table_number_; ar & key_size_; ar & multi_probe_level_; ar & xor_masks_; ar & tables_; if (Archive::is_loading::value) { index_params_["algorithm"] = getType(); index_params_["table_number"] = table_number_; index_params_["key_size"] = key_size_; index_params_["multi_probe_level"] = multi_probe_level_; } } void saveIndex(FILE* stream) { serialization::SaveArchive sa(stream); sa & *this; } void loadIndex(FILE* stream) { serialization::LoadArchive la(stream); la & *this; } /** * Computes the index memory usage * Returns: memory used by the index */ int usedMemory() const { return size_ * sizeof(int); } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * \brief Perform k-nearest neighbor search * \param[in] queries The query points for which to find the nearest neighbors * \param[out] indices The indices of the nearest neighbors found * \param[out] dists Distances to the nearest neighbors found * \param[in] knn Number of nearest neighbors to return * \param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen_); if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (params.use_heap==FLANN_True) { #pragma omp parallel num_threads(params.cores) { KNNUniqueResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * Find set of nearest neighbors to vec. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * maxCheck = the maximum number of restarts (in a best-bin-first manner) */ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) const { getNeighbors(vec, result); } protected: /** * Builds the index */ void buildIndexImpl() { tables_.resize(table_number_); std::vector<std::pair<size_t,ElementType*> > features; features.reserve(points_.size()); for (size_t i=0;i<points_.size();++i) { features.push_back(std::make_pair(i, points_[i])); } for (unsigned int i = 0; i < table_number_; ++i) { lsh::LshTable<ElementType>& table = tables_[i]; table = lsh::LshTable<ElementType>(veclen_, key_size_); // Add the features to the table table.add(features); } } void freeIndex() { /* nothing to do here */ } private: /** Defines the comparator on score and index */ typedef std::pair<float, unsigned int> ScoreIndexPair; struct SortScoreIndexPairOnSecond { bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const { return left.second < right.second; } }; /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH * @param key the key we build neighbors from * @param lowest_index the lowest index of the bit set * @param level the multi-probe level we are at * @param xor_masks all the xor mask */ void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, std::vector<lsh::BucketKey>& xor_masks) { xor_masks.push_back(key); if (level == 0) return; for (int index = lowest_index - 1; index >= 0; --index) { // Create a new key lsh::BucketKey new_key = key | (1 << index); fill_xor_mask(new_key, index, level - 1, xor_masks); } } /** Performs the approximate nearest-neighbor search. * @param vec the feature to analyze * @param do_radius flag indicating if we check the radius too * @param radius the radius if it is a radius search * @param do_k flag indicating if we limit the number of nn * @param k_nn the number of nearest neighbors * @param checked_average used for debugging */ void getNeighbors(const ElementType* vec, bool do_radius, float radius, bool do_k, unsigned int k_nn, float& checked_average) { static std::vector<ScoreIndexPair> score_index_heap; if (do_k) { unsigned int worst_score = std::numeric_limits<unsigned int>::max(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; hamming_distance = distance_(vec, points_[*training_index].point, veclen_); if (hamming_distance < worst_score) { // Insert the new element score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); std::push_heap(score_index_heap.begin(), score_index_heap.end()); if (score_index_heap.size() > (unsigned int)k_nn) { // Remove the highest distance value as we have too many elements std::pop_heap(score_index_heap.begin(), score_index_heap.end()); score_index_heap.pop_back(); // Keep track of the worst score worst_score = score_index_heap.front().first; } } } } } } else { typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; // Compute the Hamming distance hamming_distance = distance_(vec, points_[*training_index].point, veclen_); if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); } } } } } /** Performs the approximate nearest-neighbor search. * This is a slower version than the above as it uses the ResultSet * @param vec the feature to analyze */ void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result) const { typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin(); typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end(); for (; table != table_end; ++table) { size_t key = table->getKey(vec); std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin(); std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end(); for (; xor_mask != xor_mask_end; ++xor_mask) { size_t sub_key = key ^ (*xor_mask); const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); if (bucket == 0) continue; // Go over each descriptor index std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin(); std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end(); DistanceType hamming_distance; // Process the rest of the candidates for (; training_index < last_training_index; ++training_index) { if (removed_ && removed_points_.test(*training_index)) continue; // Compute the Hamming distance hamming_distance = distance_(vec, points_[*training_index], veclen_); result.addPoint(hamming_distance, *training_index); } } } } void swap(LshIndex& other) { BaseClass::swap(other); std::swap(tables_, other.tables_); std::swap(size_at_build_, other.size_at_build_); std::swap(table_number_, other.table_number_); std::swap(key_size_, other.key_size_); std::swap(multi_probe_level_, other.multi_probe_level_); std::swap(xor_masks_, other.xor_masks_); } /** The different hash tables */ std::vector<lsh::LshTable<ElementType> > tables_; /** table number */ unsigned int table_number_; /** key size */ unsigned int key_size_; /** How far should we look for neighbors in multi-probe LSH */ unsigned int multi_probe_level_; /** The XOR masks to apply to a key to get the neighboring buckets */ std::vector<lsh::BucketKey> xor_masks_; USING_BASECLASS_SYMBOLS }; } #endif //FLANN_LSH_INDEX_H_
fft.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include "bots.h" #include "app-desc.h" /* Definitions and operations for complex numbers */ /* * compute the W coefficients (that is, powers of the root of 1) * and store them into an array. */ void compute_w_coefficients(int n, int a, int b, COMPLEX * W) { register double twoPiOverN; register int k; register REAL s, c; if (b - a < 128) { twoPiOverN = 2.0 * 3.1415926535897932384626434 / n; for (k = a; k <= b; ++k) { c = cos(twoPiOverN * k); c_re(W[k]) = c_re(W[n - k]) = c; s = sin(twoPiOverN * k); c_im(W[k]) = -s; c_im(W[n - k]) = s; } } else { int ab = (a + b) / 2; #pragma omp task compute_w_coefficients(n, a, ab, W); #pragma omp task compute_w_coefficients(n, ab + 1, b, W); #pragma omp taskwait } } void compute_w_coefficients_seq(int n, int a, int b, COMPLEX * W) { register double twoPiOverN; register int k; register REAL s, c; if (b - a < 128) { twoPiOverN = 2.0 * 3.1415926535897932384626434 / n; for (k = a; k <= b; ++k) { c = cos(twoPiOverN * k); c_re(W[k]) = c_re(W[n - k]) = c; s = sin(twoPiOverN * k); c_im(W[k]) = -s; c_im(W[n - k]) = s; } } else { int ab = (a + b) / 2; compute_w_coefficients_seq(n, a, ab, W); compute_w_coefficients_seq(n, ab + 1, b, W); } } /* * Determine (in a stupid way) if n is divisible by eight, then by four, else * find the smallest prime factor of n. */ int factor(int n) { int r; if (n < 2) return 1; if (n == 64 || n == 128 || n == 256 || n == 1024 || n == 2048 || n == 4096) return 8; if ((n & 15) == 0) return 16; if ((n & 7) == 0) return 8; if ((n & 3) == 0) return 4; if ((n & 1) == 0) return 2; /* try odd numbers up to n (computing the sqrt may be slower) */ for (r = 3; r < n; r += 2) if (n % r == 0) return r; /* n is prime */ return n; } void unshuffle(int a, int b, COMPLEX * in, COMPLEX * out, int r, int m) { int i, j; int r4 = r & (~0x3); const COMPLEX *ip; COMPLEX *jp; if (b - a < 16) { ip = in + a * r; for (i = a; i < b; ++i) { jp = out + i; for (j = 0; j < r4; j += 4) { jp[0] = ip[0]; jp[m] = ip[1]; jp[2 * m] = ip[2]; jp[3 * m] = ip[3]; jp += 4 * m; ip += 4; } for (; j < r; ++j) { *jp = *ip; ip++; jp += m; } } } else { int ab = (a + b) / 2; #pragma omp task unshuffle(a, ab, in, out, r, m); #pragma omp task unshuffle(ab, b, in, out, r, m); #pragma omp taskwait } } void unshuffle_seq(int a, int b, COMPLEX * in, COMPLEX * out, int r, int m) { int i, j; int r4 = r & (~0x3); const COMPLEX *ip; COMPLEX *jp; if (b - a < 16) { ip = in + a * r; for (i = a; i < b; ++i) { jp = out + i; for (j = 0; j < r4; j += 4) { jp[0] = ip[0]; jp[m] = ip[1]; jp[2 * m] = ip[2]; jp[3 * m] = ip[3]; jp += 4 * m; ip += 4; } for (; j < r; ++j) { *jp = *ip; ip++; jp += m; } } } else { int ab = (a + b) / 2; unshuffle_seq(a, ab, in, out, r, m); unshuffle_seq(ab, b, in, out, r, m); } } void fft_twiddle_gen1(COMPLEX * in, COMPLEX * out, COMPLEX * W, int r, int m, int nW, int nWdnti, int nWdntm) { int j, k; COMPLEX *jp, *kp; for (k = 0, kp = out; k < r; ++k, kp += m) { REAL r0, i0, rt, it, rw, iw; int l1 = nWdnti + nWdntm * k; int l0; r0 = i0 = 0.0; for (j = 0, jp = in, l0 = 0; j < r; ++j, jp += m) { rw = c_re(W[l0]); iw = c_im(W[l0]); rt = c_re(*jp); it = c_im(*jp); r0 += rt * rw - it * iw; i0 += rt * iw + it * rw; l0 += l1; if (l0 > nW) l0 -= nW; } c_re(*kp) = r0; c_im(*kp) = i0; } } void fft_twiddle_gen(int i, int i1, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int r, int m) { if (i == i1 - 1) { #pragma omp task fft_twiddle_gen1(in + i, out + i, W, r, m, nW, nWdn * i, nWdn * m); } else { int i2 = (i + i1) / 2; #pragma omp task fft_twiddle_gen(i, i2, in, out, W, nW, nWdn, r, m); #pragma omp task fft_twiddle_gen(i2, i1, in, out, W, nW, nWdn, r, m); } #pragma omp taskwait } void fft_twiddle_gen_seq(int i, int i1, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int r, int m) { if (i == i1 - 1) { fft_twiddle_gen1(in + i, out + i, W, r, m, nW, nWdn * i, nWdn * m); } else { int i2 = (i + i1) / 2; fft_twiddle_gen_seq(i, i2, in, out, W, nW, nWdn, r, m); fft_twiddle_gen_seq(i2, i1, in, out, W, nW, nWdn, r, m); } } /* machine-generated code begins here */ void fft_base_2(COMPLEX * in, COMPLEX * out) { REAL r1_0, i1_0; REAL r1_1, i1_1; r1_0 = c_re(in[0]); i1_0 = c_im(in[0]); r1_1 = c_re(in[1]); i1_1 = c_im(in[1]); c_re(out[0]) = (r1_0 + r1_1); c_im(out[0]) = (i1_0 + i1_1); c_re(out[1]) = (r1_0 - r1_1); c_im(out[1]) = (i1_0 - i1_1); } void fft_twiddle_2(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; r1_0 = c_re(jp[0 * m]); i1_0 = c_im(jp[0 * m]); wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r1_1 = ((wr * tmpr) - (wi * tmpi)); i1_1 = ((wi * tmpr) + (wr * tmpi)); c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[1 * m]) = (r1_0 - r1_1); c_im(kp[1 * m]) = (i1_0 - i1_1); } } } else { int ab = (a + b) / 2; #pragma omp task fft_twiddle_2(a, ab, in, out, W, nW, nWdn, m); #pragma omp task fft_twiddle_2(ab, b, in, out, W, nW, nWdn, m); #pragma omp taskwait } } void fft_twiddle_2_seq(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; r1_0 = c_re(jp[0 * m]); i1_0 = c_im(jp[0 * m]); wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r1_1 = ((wr * tmpr) - (wi * tmpi)); i1_1 = ((wi * tmpr) + (wr * tmpi)); c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[1 * m]) = (r1_0 - r1_1); c_im(kp[1 * m]) = (i1_0 - i1_1); } } } else { int ab = (a + b) / 2; fft_twiddle_2_seq(a, ab, in, out, W, nW, nWdn, m); fft_twiddle_2_seq(ab, b, in, out, W, nW, nWdn, m); } } void fft_unshuffle_2(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 2; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; #pragma omp task fft_unshuffle_2(a, ab, in, out, m); #pragma omp task fft_unshuffle_2(ab, b, in, out, m); #pragma omp taskwait } } void fft_unshuffle_2_seq(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 2; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; fft_unshuffle_2_seq(a, ab, in, out, m); fft_unshuffle_2_seq(ab, b, in, out, m); } } void fft_base_4(COMPLEX * in, COMPLEX * out) { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; { REAL r2_0, i2_0; REAL r2_2, i2_2; r2_0 = c_re(in[0]); i2_0 = c_im(in[0]); r2_2 = c_re(in[2]); i2_2 = c_im(in[2]); r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_2 = (r2_0 - r2_2); i1_2 = (i2_0 - i2_2); } { REAL r2_1, i2_1; REAL r2_3, i2_3; r2_1 = c_re(in[1]); i2_1 = c_im(in[1]); r2_3 = c_re(in[3]); i2_3 = c_im(in[3]); r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_3 = (r2_1 - r2_3); i1_3 = (i2_1 - i2_3); } c_re(out[0]) = (r1_0 + r1_1); c_im(out[0]) = (i1_0 + i1_1); c_re(out[2]) = (r1_0 - r1_1); c_im(out[2]) = (i1_0 - i1_1); c_re(out[1]) = (r1_2 + i1_3); c_im(out[1]) = (i1_2 - r1_3); c_re(out[3]) = (r1_2 - i1_3); c_im(out[3]) = (i1_2 + r1_3); } void fft_twiddle_4(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; { REAL r2_0, i2_0; REAL r2_2, i2_2; r2_0 = c_re(jp[0 * m]); i2_0 = c_im(jp[0 * m]); wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r2_2 = ((wr * tmpr) - (wi * tmpi)); i2_2 = ((wi * tmpr) + (wr * tmpi)); r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_2 = (r2_0 - r2_2); i1_2 = (i2_0 - i2_2); } { REAL r2_1, i2_1; REAL r2_3, i2_3; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r2_1 = ((wr * tmpr) - (wi * tmpi)); i2_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r2_3 = ((wr * tmpr) - (wi * tmpi)); i2_3 = ((wi * tmpr) + (wr * tmpi)); r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_3 = (r2_1 - r2_3); i1_3 = (i2_1 - i2_3); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[2 * m]) = (r1_0 - r1_1); c_im(kp[2 * m]) = (i1_0 - i1_1); c_re(kp[1 * m]) = (r1_2 + i1_3); c_im(kp[1 * m]) = (i1_2 - r1_3); c_re(kp[3 * m]) = (r1_2 - i1_3); c_im(kp[3 * m]) = (i1_2 + r1_3); } } } else { int ab = (a + b) / 2; #pragma omp task fft_twiddle_4(a, ab, in, out, W, nW, nWdn, m); #pragma omp task fft_twiddle_4(ab, b, in, out, W, nW, nWdn, m); #pragma omp taskwait } } void fft_twiddle_4_seq(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; { REAL r2_0, i2_0; REAL r2_2, i2_2; r2_0 = c_re(jp[0 * m]); i2_0 = c_im(jp[0 * m]); wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r2_2 = ((wr * tmpr) - (wi * tmpi)); i2_2 = ((wi * tmpr) + (wr * tmpi)); r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_2 = (r2_0 - r2_2); i1_2 = (i2_0 - i2_2); } { REAL r2_1, i2_1; REAL r2_3, i2_3; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r2_1 = ((wr * tmpr) - (wi * tmpi)); i2_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r2_3 = ((wr * tmpr) - (wi * tmpi)); i2_3 = ((wi * tmpr) + (wr * tmpi)); r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_3 = (r2_1 - r2_3); i1_3 = (i2_1 - i2_3); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[2 * m]) = (r1_0 - r1_1); c_im(kp[2 * m]) = (i1_0 - i1_1); c_re(kp[1 * m]) = (r1_2 + i1_3); c_im(kp[1 * m]) = (i1_2 - r1_3); c_re(kp[3 * m]) = (r1_2 - i1_3); c_im(kp[3 * m]) = (i1_2 + r1_3); } } } else { int ab = (a + b) / 2; fft_twiddle_4_seq(a, ab, in, out, W, nW, nWdn, m); fft_twiddle_4_seq(ab, b, in, out, W, nW, nWdn, m); } } void fft_unshuffle_4(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 4; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; #pragma omp task fft_unshuffle_4(a, ab, in, out, m); #pragma omp task fft_unshuffle_4(ab, b, in, out, m); #pragma omp taskwait } } void fft_unshuffle_4_seq(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 4; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; fft_unshuffle_4_seq(a, ab, in, out, m); fft_unshuffle_4_seq(ab, b, in, out, m); } } void fft_base_8(COMPLEX * in, COMPLEX * out) { REAL tmpr, tmpi; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; { REAL r3_0, i3_0; REAL r3_4, i3_4; r3_0 = c_re(in[0]); i3_0 = c_im(in[0]); r3_4 = c_re(in[4]); i3_4 = c_im(in[4]); r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_4 = (r3_0 - r3_4); i2_4 = (i3_0 - i3_4); } { REAL r3_2, i3_2; REAL r3_6, i3_6; r3_2 = c_re(in[2]); i3_2 = c_im(in[2]); r3_6 = c_re(in[6]); i3_6 = c_im(in[6]); r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_6 = (r3_2 - r3_6); i2_6 = (i3_2 - i3_6); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_4 = (r2_0 - r2_2); i1_4 = (i2_0 - i2_2); r1_2 = (r2_4 + i2_6); i1_2 = (i2_4 - r2_6); r1_6 = (r2_4 - i2_6); i1_6 = (i2_4 + r2_6); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; { REAL r3_1, i3_1; REAL r3_5, i3_5; r3_1 = c_re(in[1]); i3_1 = c_im(in[1]); r3_5 = c_re(in[5]); i3_5 = c_im(in[5]); r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_5 = (r3_1 - r3_5); i2_5 = (i3_1 - i3_5); } { REAL r3_3, i3_3; REAL r3_7, i3_7; r3_3 = c_re(in[3]); i3_3 = c_im(in[3]); r3_7 = c_re(in[7]); i3_7 = c_im(in[7]); r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_7 = (r3_3 - r3_7); i2_7 = (i3_3 - i3_7); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_5 = (r2_1 - r2_3); i1_5 = (i2_1 - i2_3); r1_3 = (r2_5 + i2_7); i1_3 = (i2_5 - r2_7); r1_7 = (r2_5 - i2_7); i1_7 = (i2_5 + r2_7); } c_re(out[0]) = (r1_0 + r1_1); c_im(out[0]) = (i1_0 + i1_1); c_re(out[4]) = (r1_0 - r1_1); c_im(out[4]) = (i1_0 - i1_1); tmpr = (0.707106781187 * (r1_3 + i1_3)); tmpi = (0.707106781187 * (i1_3 - r1_3)); c_re(out[1]) = (r1_2 + tmpr); c_im(out[1]) = (i1_2 + tmpi); c_re(out[5]) = (r1_2 - tmpr); c_im(out[5]) = (i1_2 - tmpi); c_re(out[2]) = (r1_4 + i1_5); c_im(out[2]) = (i1_4 - r1_5); c_re(out[6]) = (r1_4 - i1_5); c_im(out[6]) = (i1_4 + r1_5); tmpr = (0.707106781187 * (i1_7 - r1_7)); tmpi = (0.707106781187 * (r1_7 + i1_7)); c_re(out[3]) = (r1_6 + tmpr); c_im(out[3]) = (i1_6 - tmpi); c_re(out[7]) = (r1_6 - tmpr); c_im(out[7]) = (i1_6 + tmpi); } } void fft_twiddle_8(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; { REAL r3_0, i3_0; REAL r3_4, i3_4; r3_0 = c_re(jp[0 * m]); i3_0 = c_im(jp[0 * m]); wr = c_re(W[4 * l1]); wi = c_im(W[4 * l1]); tmpr = c_re(jp[4 * m]); tmpi = c_im(jp[4 * m]); r3_4 = ((wr * tmpr) - (wi * tmpi)); i3_4 = ((wi * tmpr) + (wr * tmpi)); r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_4 = (r3_0 - r3_4); i2_4 = (i3_0 - i3_4); } { REAL r3_2, i3_2; REAL r3_6, i3_6; wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r3_2 = ((wr * tmpr) - (wi * tmpi)); i3_2 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[6 * l1]); wi = c_im(W[6 * l1]); tmpr = c_re(jp[6 * m]); tmpi = c_im(jp[6 * m]); r3_6 = ((wr * tmpr) - (wi * tmpi)); i3_6 = ((wi * tmpr) + (wr * tmpi)); r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_6 = (r3_2 - r3_6); i2_6 = (i3_2 - i3_6); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_4 = (r2_0 - r2_2); i1_4 = (i2_0 - i2_2); r1_2 = (r2_4 + i2_6); i1_2 = (i2_4 - r2_6); r1_6 = (r2_4 - i2_6); i1_6 = (i2_4 + r2_6); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; { REAL r3_1, i3_1; REAL r3_5, i3_5; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r3_1 = ((wr * tmpr) - (wi * tmpi)); i3_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[5 * l1]); wi = c_im(W[5 * l1]); tmpr = c_re(jp[5 * m]); tmpi = c_im(jp[5 * m]); r3_5 = ((wr * tmpr) - (wi * tmpi)); i3_5 = ((wi * tmpr) + (wr * tmpi)); r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_5 = (r3_1 - r3_5); i2_5 = (i3_1 - i3_5); } { REAL r3_3, i3_3; REAL r3_7, i3_7; wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r3_3 = ((wr * tmpr) - (wi * tmpi)); i3_3 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[7 * l1]); wi = c_im(W[7 * l1]); tmpr = c_re(jp[7 * m]); tmpi = c_im(jp[7 * m]); r3_7 = ((wr * tmpr) - (wi * tmpi)); i3_7 = ((wi * tmpr) + (wr * tmpi)); r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_7 = (r3_3 - r3_7); i2_7 = (i3_3 - i3_7); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_5 = (r2_1 - r2_3); i1_5 = (i2_1 - i2_3); r1_3 = (r2_5 + i2_7); i1_3 = (i2_5 - r2_7); r1_7 = (r2_5 - i2_7); i1_7 = (i2_5 + r2_7); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[4 * m]) = (r1_0 - r1_1); c_im(kp[4 * m]) = (i1_0 - i1_1); tmpr = (0.707106781187 * (r1_3 + i1_3)); tmpi = (0.707106781187 * (i1_3 - r1_3)); c_re(kp[1 * m]) = (r1_2 + tmpr); c_im(kp[1 * m]) = (i1_2 + tmpi); c_re(kp[5 * m]) = (r1_2 - tmpr); c_im(kp[5 * m]) = (i1_2 - tmpi); c_re(kp[2 * m]) = (r1_4 + i1_5); c_im(kp[2 * m]) = (i1_4 - r1_5); c_re(kp[6 * m]) = (r1_4 - i1_5); c_im(kp[6 * m]) = (i1_4 + r1_5); tmpr = (0.707106781187 * (i1_7 - r1_7)); tmpi = (0.707106781187 * (r1_7 + i1_7)); c_re(kp[3 * m]) = (r1_6 + tmpr); c_im(kp[3 * m]) = (i1_6 - tmpi); c_re(kp[7 * m]) = (r1_6 - tmpr); c_im(kp[7 * m]) = (i1_6 + tmpi); } } } else { int ab = (a + b) / 2; #pragma omp task fft_twiddle_8(a, ab, in, out, W, nW, nWdn, m); #pragma omp task fft_twiddle_8(ab, b, in, out, W, nW, nWdn, m); #pragma omp taskwait } } void fft_twiddle_8_seq(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; { REAL r3_0, i3_0; REAL r3_4, i3_4; r3_0 = c_re(jp[0 * m]); i3_0 = c_im(jp[0 * m]); wr = c_re(W[4 * l1]); wi = c_im(W[4 * l1]); tmpr = c_re(jp[4 * m]); tmpi = c_im(jp[4 * m]); r3_4 = ((wr * tmpr) - (wi * tmpi)); i3_4 = ((wi * tmpr) + (wr * tmpi)); r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_4 = (r3_0 - r3_4); i2_4 = (i3_0 - i3_4); } { REAL r3_2, i3_2; REAL r3_6, i3_6; wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r3_2 = ((wr * tmpr) - (wi * tmpi)); i3_2 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[6 * l1]); wi = c_im(W[6 * l1]); tmpr = c_re(jp[6 * m]); tmpi = c_im(jp[6 * m]); r3_6 = ((wr * tmpr) - (wi * tmpi)); i3_6 = ((wi * tmpr) + (wr * tmpi)); r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_6 = (r3_2 - r3_6); i2_6 = (i3_2 - i3_6); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_4 = (r2_0 - r2_2); i1_4 = (i2_0 - i2_2); r1_2 = (r2_4 + i2_6); i1_2 = (i2_4 - r2_6); r1_6 = (r2_4 - i2_6); i1_6 = (i2_4 + r2_6); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; { REAL r3_1, i3_1; REAL r3_5, i3_5; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r3_1 = ((wr * tmpr) - (wi * tmpi)); i3_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[5 * l1]); wi = c_im(W[5 * l1]); tmpr = c_re(jp[5 * m]); tmpi = c_im(jp[5 * m]); r3_5 = ((wr * tmpr) - (wi * tmpi)); i3_5 = ((wi * tmpr) + (wr * tmpi)); r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_5 = (r3_1 - r3_5); i2_5 = (i3_1 - i3_5); } { REAL r3_3, i3_3; REAL r3_7, i3_7; wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r3_3 = ((wr * tmpr) - (wi * tmpi)); i3_3 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[7 * l1]); wi = c_im(W[7 * l1]); tmpr = c_re(jp[7 * m]); tmpi = c_im(jp[7 * m]); r3_7 = ((wr * tmpr) - (wi * tmpi)); i3_7 = ((wi * tmpr) + (wr * tmpi)); r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_7 = (r3_3 - r3_7); i2_7 = (i3_3 - i3_7); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_5 = (r2_1 - r2_3); i1_5 = (i2_1 - i2_3); r1_3 = (r2_5 + i2_7); i1_3 = (i2_5 - r2_7); r1_7 = (r2_5 - i2_7); i1_7 = (i2_5 + r2_7); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[4 * m]) = (r1_0 - r1_1); c_im(kp[4 * m]) = (i1_0 - i1_1); tmpr = (0.707106781187 * (r1_3 + i1_3)); tmpi = (0.707106781187 * (i1_3 - r1_3)); c_re(kp[1 * m]) = (r1_2 + tmpr); c_im(kp[1 * m]) = (i1_2 + tmpi); c_re(kp[5 * m]) = (r1_2 - tmpr); c_im(kp[5 * m]) = (i1_2 - tmpi); c_re(kp[2 * m]) = (r1_4 + i1_5); c_im(kp[2 * m]) = (i1_4 - r1_5); c_re(kp[6 * m]) = (r1_4 - i1_5); c_im(kp[6 * m]) = (i1_4 + r1_5); tmpr = (0.707106781187 * (i1_7 - r1_7)); tmpi = (0.707106781187 * (r1_7 + i1_7)); c_re(kp[3 * m]) = (r1_6 + tmpr); c_im(kp[3 * m]) = (i1_6 - tmpi); c_re(kp[7 * m]) = (r1_6 - tmpr); c_im(kp[7 * m]) = (i1_6 + tmpi); } } } else { int ab = (a + b) / 2; fft_twiddle_8_seq(a, ab, in, out, W, nW, nWdn, m); fft_twiddle_8_seq(ab, b, in, out, W, nW, nWdn, m); } } void fft_unshuffle_8(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 8; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; #pragma omp task fft_unshuffle_8(a, ab, in, out, m); #pragma omp task fft_unshuffle_8(ab, b, in, out, m); #pragma omp taskwait } } void fft_unshuffle_8_seq(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 8; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; fft_unshuffle_8_seq(a, ab, in, out, m); fft_unshuffle_8_seq(ab, b, in, out, m); } } void fft_base_16(COMPLEX * in, COMPLEX * out) { REAL tmpr, tmpi; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; REAL r1_8, i1_8; REAL r1_9, i1_9; REAL r1_10, i1_10; REAL r1_11, i1_11; REAL r1_12, i1_12; REAL r1_13, i1_13; REAL r1_14, i1_14; REAL r1_15, i1_15; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; REAL r2_8, i2_8; REAL r2_10, i2_10; REAL r2_12, i2_12; REAL r2_14, i2_14; { REAL r3_0, i3_0; REAL r3_4, i3_4; REAL r3_8, i3_8; REAL r3_12, i3_12; { REAL r4_0, i4_0; REAL r4_8, i4_8; r4_0 = c_re(in[0]); i4_0 = c_im(in[0]); r4_8 = c_re(in[8]); i4_8 = c_im(in[8]); r3_0 = (r4_0 + r4_8); i3_0 = (i4_0 + i4_8); r3_8 = (r4_0 - r4_8); i3_8 = (i4_0 - i4_8); } { REAL r4_4, i4_4; REAL r4_12, i4_12; r4_4 = c_re(in[4]); i4_4 = c_im(in[4]); r4_12 = c_re(in[12]); i4_12 = c_im(in[12]); r3_4 = (r4_4 + r4_12); i3_4 = (i4_4 + i4_12); r3_12 = (r4_4 - r4_12); i3_12 = (i4_4 - i4_12); } r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_8 = (r3_0 - r3_4); i2_8 = (i3_0 - i3_4); r2_4 = (r3_8 + i3_12); i2_4 = (i3_8 - r3_12); r2_12 = (r3_8 - i3_12); i2_12 = (i3_8 + r3_12); } { REAL r3_2, i3_2; REAL r3_6, i3_6; REAL r3_10, i3_10; REAL r3_14, i3_14; { REAL r4_2, i4_2; REAL r4_10, i4_10; r4_2 = c_re(in[2]); i4_2 = c_im(in[2]); r4_10 = c_re(in[10]); i4_10 = c_im(in[10]); r3_2 = (r4_2 + r4_10); i3_2 = (i4_2 + i4_10); r3_10 = (r4_2 - r4_10); i3_10 = (i4_2 - i4_10); } { REAL r4_6, i4_6; REAL r4_14, i4_14; r4_6 = c_re(in[6]); i4_6 = c_im(in[6]); r4_14 = c_re(in[14]); i4_14 = c_im(in[14]); r3_6 = (r4_6 + r4_14); i3_6 = (i4_6 + i4_14); r3_14 = (r4_6 - r4_14); i3_14 = (i4_6 - i4_14); } r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_10 = (r3_2 - r3_6); i2_10 = (i3_2 - i3_6); r2_6 = (r3_10 + i3_14); i2_6 = (i3_10 - r3_14); r2_14 = (r3_10 - i3_14); i2_14 = (i3_10 + r3_14); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_8 = (r2_0 - r2_2); i1_8 = (i2_0 - i2_2); tmpr = (0.707106781187 * (r2_6 + i2_6)); tmpi = (0.707106781187 * (i2_6 - r2_6)); r1_2 = (r2_4 + tmpr); i1_2 = (i2_4 + tmpi); r1_10 = (r2_4 - tmpr); i1_10 = (i2_4 - tmpi); r1_4 = (r2_8 + i2_10); i1_4 = (i2_8 - r2_10); r1_12 = (r2_8 - i2_10); i1_12 = (i2_8 + r2_10); tmpr = (0.707106781187 * (i2_14 - r2_14)); tmpi = (0.707106781187 * (r2_14 + i2_14)); r1_6 = (r2_12 + tmpr); i1_6 = (i2_12 - tmpi); r1_14 = (r2_12 - tmpr); i1_14 = (i2_12 + tmpi); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; REAL r2_9, i2_9; REAL r2_11, i2_11; REAL r2_13, i2_13; REAL r2_15, i2_15; { REAL r3_1, i3_1; REAL r3_5, i3_5; REAL r3_9, i3_9; REAL r3_13, i3_13; { REAL r4_1, i4_1; REAL r4_9, i4_9; r4_1 = c_re(in[1]); i4_1 = c_im(in[1]); r4_9 = c_re(in[9]); i4_9 = c_im(in[9]); r3_1 = (r4_1 + r4_9); i3_1 = (i4_1 + i4_9); r3_9 = (r4_1 - r4_9); i3_9 = (i4_1 - i4_9); } { REAL r4_5, i4_5; REAL r4_13, i4_13; r4_5 = c_re(in[5]); i4_5 = c_im(in[5]); r4_13 = c_re(in[13]); i4_13 = c_im(in[13]); r3_5 = (r4_5 + r4_13); i3_5 = (i4_5 + i4_13); r3_13 = (r4_5 - r4_13); i3_13 = (i4_5 - i4_13); } r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_9 = (r3_1 - r3_5); i2_9 = (i3_1 - i3_5); r2_5 = (r3_9 + i3_13); i2_5 = (i3_9 - r3_13); r2_13 = (r3_9 - i3_13); i2_13 = (i3_9 + r3_13); } { REAL r3_3, i3_3; REAL r3_7, i3_7; REAL r3_11, i3_11; REAL r3_15, i3_15; { REAL r4_3, i4_3; REAL r4_11, i4_11; r4_3 = c_re(in[3]); i4_3 = c_im(in[3]); r4_11 = c_re(in[11]); i4_11 = c_im(in[11]); r3_3 = (r4_3 + r4_11); i3_3 = (i4_3 + i4_11); r3_11 = (r4_3 - r4_11); i3_11 = (i4_3 - i4_11); } { REAL r4_7, i4_7; REAL r4_15, i4_15; r4_7 = c_re(in[7]); i4_7 = c_im(in[7]); r4_15 = c_re(in[15]); i4_15 = c_im(in[15]); r3_7 = (r4_7 + r4_15); i3_7 = (i4_7 + i4_15); r3_15 = (r4_7 - r4_15); i3_15 = (i4_7 - i4_15); } r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_11 = (r3_3 - r3_7); i2_11 = (i3_3 - i3_7); r2_7 = (r3_11 + i3_15); i2_7 = (i3_11 - r3_15); r2_15 = (r3_11 - i3_15); i2_15 = (i3_11 + r3_15); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_9 = (r2_1 - r2_3); i1_9 = (i2_1 - i2_3); tmpr = (0.707106781187 * (r2_7 + i2_7)); tmpi = (0.707106781187 * (i2_7 - r2_7)); r1_3 = (r2_5 + tmpr); i1_3 = (i2_5 + tmpi); r1_11 = (r2_5 - tmpr); i1_11 = (i2_5 - tmpi); r1_5 = (r2_9 + i2_11); i1_5 = (i2_9 - r2_11); r1_13 = (r2_9 - i2_11); i1_13 = (i2_9 + r2_11); tmpr = (0.707106781187 * (i2_15 - r2_15)); tmpi = (0.707106781187 * (r2_15 + i2_15)); r1_7 = (r2_13 + tmpr); i1_7 = (i2_13 - tmpi); r1_15 = (r2_13 - tmpr); i1_15 = (i2_13 + tmpi); } c_re(out[0]) = (r1_0 + r1_1); c_im(out[0]) = (i1_0 + i1_1); c_re(out[8]) = (r1_0 - r1_1); c_im(out[8]) = (i1_0 - i1_1); tmpr = ((0.923879532511 * r1_3) + (0.382683432365 * i1_3)); tmpi = ((0.923879532511 * i1_3) - (0.382683432365 * r1_3)); c_re(out[1]) = (r1_2 + tmpr); c_im(out[1]) = (i1_2 + tmpi); c_re(out[9]) = (r1_2 - tmpr); c_im(out[9]) = (i1_2 - tmpi); tmpr = (0.707106781187 * (r1_5 + i1_5)); tmpi = (0.707106781187 * (i1_5 - r1_5)); c_re(out[2]) = (r1_4 + tmpr); c_im(out[2]) = (i1_4 + tmpi); c_re(out[10]) = (r1_4 - tmpr); c_im(out[10]) = (i1_4 - tmpi); tmpr = ((0.382683432365 * r1_7) + (0.923879532511 * i1_7)); tmpi = ((0.382683432365 * i1_7) - (0.923879532511 * r1_7)); c_re(out[3]) = (r1_6 + tmpr); c_im(out[3]) = (i1_6 + tmpi); c_re(out[11]) = (r1_6 - tmpr); c_im(out[11]) = (i1_6 - tmpi); c_re(out[4]) = (r1_8 + i1_9); c_im(out[4]) = (i1_8 - r1_9); c_re(out[12]) = (r1_8 - i1_9); c_im(out[12]) = (i1_8 + r1_9); tmpr = ((0.923879532511 * i1_11) - (0.382683432365 * r1_11)); tmpi = ((0.923879532511 * r1_11) + (0.382683432365 * i1_11)); c_re(out[5]) = (r1_10 + tmpr); c_im(out[5]) = (i1_10 - tmpi); c_re(out[13]) = (r1_10 - tmpr); c_im(out[13]) = (i1_10 + tmpi); tmpr = (0.707106781187 * (i1_13 - r1_13)); tmpi = (0.707106781187 * (r1_13 + i1_13)); c_re(out[6]) = (r1_12 + tmpr); c_im(out[6]) = (i1_12 - tmpi); c_re(out[14]) = (r1_12 - tmpr); c_im(out[14]) = (i1_12 + tmpi); tmpr = ((0.382683432365 * i1_15) - (0.923879532511 * r1_15)); tmpi = ((0.382683432365 * r1_15) + (0.923879532511 * i1_15)); c_re(out[7]) = (r1_14 + tmpr); c_im(out[7]) = (i1_14 - tmpi); c_re(out[15]) = (r1_14 - tmpr); c_im(out[15]) = (i1_14 + tmpi); } } void fft_twiddle_16(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; REAL r1_8, i1_8; REAL r1_9, i1_9; REAL r1_10, i1_10; REAL r1_11, i1_11; REAL r1_12, i1_12; REAL r1_13, i1_13; REAL r1_14, i1_14; REAL r1_15, i1_15; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; REAL r2_8, i2_8; REAL r2_10, i2_10; REAL r2_12, i2_12; REAL r2_14, i2_14; { REAL r3_0, i3_0; REAL r3_4, i3_4; REAL r3_8, i3_8; REAL r3_12, i3_12; { REAL r4_0, i4_0; REAL r4_8, i4_8; r4_0 = c_re(jp[0 * m]); i4_0 = c_im(jp[0 * m]); wr = c_re(W[8 * l1]); wi = c_im(W[8 * l1]); tmpr = c_re(jp[8 * m]); tmpi = c_im(jp[8 * m]); r4_8 = ((wr * tmpr) - (wi * tmpi)); i4_8 = ((wi * tmpr) + (wr * tmpi)); r3_0 = (r4_0 + r4_8); i3_0 = (i4_0 + i4_8); r3_8 = (r4_0 - r4_8); i3_8 = (i4_0 - i4_8); } { REAL r4_4, i4_4; REAL r4_12, i4_12; wr = c_re(W[4 * l1]); wi = c_im(W[4 * l1]); tmpr = c_re(jp[4 * m]); tmpi = c_im(jp[4 * m]); r4_4 = ((wr * tmpr) - (wi * tmpi)); i4_4 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[12 * l1]); wi = c_im(W[12 * l1]); tmpr = c_re(jp[12 * m]); tmpi = c_im(jp[12 * m]); r4_12 = ((wr * tmpr) - (wi * tmpi)); i4_12 = ((wi * tmpr) + (wr * tmpi)); r3_4 = (r4_4 + r4_12); i3_4 = (i4_4 + i4_12); r3_12 = (r4_4 - r4_12); i3_12 = (i4_4 - i4_12); } r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_8 = (r3_0 - r3_4); i2_8 = (i3_0 - i3_4); r2_4 = (r3_8 + i3_12); i2_4 = (i3_8 - r3_12); r2_12 = (r3_8 - i3_12); i2_12 = (i3_8 + r3_12); } { REAL r3_2, i3_2; REAL r3_6, i3_6; REAL r3_10, i3_10; REAL r3_14, i3_14; { REAL r4_2, i4_2; REAL r4_10, i4_10; wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r4_2 = ((wr * tmpr) - (wi * tmpi)); i4_2 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[10 * l1]); wi = c_im(W[10 * l1]); tmpr = c_re(jp[10 * m]); tmpi = c_im(jp[10 * m]); r4_10 = ((wr * tmpr) - (wi * tmpi)); i4_10 = ((wi * tmpr) + (wr * tmpi)); r3_2 = (r4_2 + r4_10); i3_2 = (i4_2 + i4_10); r3_10 = (r4_2 - r4_10); i3_10 = (i4_2 - i4_10); } { REAL r4_6, i4_6; REAL r4_14, i4_14; wr = c_re(W[6 * l1]); wi = c_im(W[6 * l1]); tmpr = c_re(jp[6 * m]); tmpi = c_im(jp[6 * m]); r4_6 = ((wr * tmpr) - (wi * tmpi)); i4_6 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[14 * l1]); wi = c_im(W[14 * l1]); tmpr = c_re(jp[14 * m]); tmpi = c_im(jp[14 * m]); r4_14 = ((wr * tmpr) - (wi * tmpi)); i4_14 = ((wi * tmpr) + (wr * tmpi)); r3_6 = (r4_6 + r4_14); i3_6 = (i4_6 + i4_14); r3_14 = (r4_6 - r4_14); i3_14 = (i4_6 - i4_14); } r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_10 = (r3_2 - r3_6); i2_10 = (i3_2 - i3_6); r2_6 = (r3_10 + i3_14); i2_6 = (i3_10 - r3_14); r2_14 = (r3_10 - i3_14); i2_14 = (i3_10 + r3_14); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_8 = (r2_0 - r2_2); i1_8 = (i2_0 - i2_2); tmpr = (0.707106781187 * (r2_6 + i2_6)); tmpi = (0.707106781187 * (i2_6 - r2_6)); r1_2 = (r2_4 + tmpr); i1_2 = (i2_4 + tmpi); r1_10 = (r2_4 - tmpr); i1_10 = (i2_4 - tmpi); r1_4 = (r2_8 + i2_10); i1_4 = (i2_8 - r2_10); r1_12 = (r2_8 - i2_10); i1_12 = (i2_8 + r2_10); tmpr = (0.707106781187 * (i2_14 - r2_14)); tmpi = (0.707106781187 * (r2_14 + i2_14)); r1_6 = (r2_12 + tmpr); i1_6 = (i2_12 - tmpi); r1_14 = (r2_12 - tmpr); i1_14 = (i2_12 + tmpi); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; REAL r2_9, i2_9; REAL r2_11, i2_11; REAL r2_13, i2_13; REAL r2_15, i2_15; { REAL r3_1, i3_1; REAL r3_5, i3_5; REAL r3_9, i3_9; REAL r3_13, i3_13; { REAL r4_1, i4_1; REAL r4_9, i4_9; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r4_1 = ((wr * tmpr) - (wi * tmpi)); i4_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[9 * l1]); wi = c_im(W[9 * l1]); tmpr = c_re(jp[9 * m]); tmpi = c_im(jp[9 * m]); r4_9 = ((wr * tmpr) - (wi * tmpi)); i4_9 = ((wi * tmpr) + (wr * tmpi)); r3_1 = (r4_1 + r4_9); i3_1 = (i4_1 + i4_9); r3_9 = (r4_1 - r4_9); i3_9 = (i4_1 - i4_9); } { REAL r4_5, i4_5; REAL r4_13, i4_13; wr = c_re(W[5 * l1]); wi = c_im(W[5 * l1]); tmpr = c_re(jp[5 * m]); tmpi = c_im(jp[5 * m]); r4_5 = ((wr * tmpr) - (wi * tmpi)); i4_5 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[13 * l1]); wi = c_im(W[13 * l1]); tmpr = c_re(jp[13 * m]); tmpi = c_im(jp[13 * m]); r4_13 = ((wr * tmpr) - (wi * tmpi)); i4_13 = ((wi * tmpr) + (wr * tmpi)); r3_5 = (r4_5 + r4_13); i3_5 = (i4_5 + i4_13); r3_13 = (r4_5 - r4_13); i3_13 = (i4_5 - i4_13); } r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_9 = (r3_1 - r3_5); i2_9 = (i3_1 - i3_5); r2_5 = (r3_9 + i3_13); i2_5 = (i3_9 - r3_13); r2_13 = (r3_9 - i3_13); i2_13 = (i3_9 + r3_13); } { REAL r3_3, i3_3; REAL r3_7, i3_7; REAL r3_11, i3_11; REAL r3_15, i3_15; { REAL r4_3, i4_3; REAL r4_11, i4_11; wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r4_3 = ((wr * tmpr) - (wi * tmpi)); i4_3 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[11 * l1]); wi = c_im(W[11 * l1]); tmpr = c_re(jp[11 * m]); tmpi = c_im(jp[11 * m]); r4_11 = ((wr * tmpr) - (wi * tmpi)); i4_11 = ((wi * tmpr) + (wr * tmpi)); r3_3 = (r4_3 + r4_11); i3_3 = (i4_3 + i4_11); r3_11 = (r4_3 - r4_11); i3_11 = (i4_3 - i4_11); } { REAL r4_7, i4_7; REAL r4_15, i4_15; wr = c_re(W[7 * l1]); wi = c_im(W[7 * l1]); tmpr = c_re(jp[7 * m]); tmpi = c_im(jp[7 * m]); r4_7 = ((wr * tmpr) - (wi * tmpi)); i4_7 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[15 * l1]); wi = c_im(W[15 * l1]); tmpr = c_re(jp[15 * m]); tmpi = c_im(jp[15 * m]); r4_15 = ((wr * tmpr) - (wi * tmpi)); i4_15 = ((wi * tmpr) + (wr * tmpi)); r3_7 = (r4_7 + r4_15); i3_7 = (i4_7 + i4_15); r3_15 = (r4_7 - r4_15); i3_15 = (i4_7 - i4_15); } r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_11 = (r3_3 - r3_7); i2_11 = (i3_3 - i3_7); r2_7 = (r3_11 + i3_15); i2_7 = (i3_11 - r3_15); r2_15 = (r3_11 - i3_15); i2_15 = (i3_11 + r3_15); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_9 = (r2_1 - r2_3); i1_9 = (i2_1 - i2_3); tmpr = (0.707106781187 * (r2_7 + i2_7)); tmpi = (0.707106781187 * (i2_7 - r2_7)); r1_3 = (r2_5 + tmpr); i1_3 = (i2_5 + tmpi); r1_11 = (r2_5 - tmpr); i1_11 = (i2_5 - tmpi); r1_5 = (r2_9 + i2_11); i1_5 = (i2_9 - r2_11); r1_13 = (r2_9 - i2_11); i1_13 = (i2_9 + r2_11); tmpr = (0.707106781187 * (i2_15 - r2_15)); tmpi = (0.707106781187 * (r2_15 + i2_15)); r1_7 = (r2_13 + tmpr); i1_7 = (i2_13 - tmpi); r1_15 = (r2_13 - tmpr); i1_15 = (i2_13 + tmpi); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[8 * m]) = (r1_0 - r1_1); c_im(kp[8 * m]) = (i1_0 - i1_1); tmpr = ((0.923879532511 * r1_3) + (0.382683432365 * i1_3)); tmpi = ((0.923879532511 * i1_3) - (0.382683432365 * r1_3)); c_re(kp[1 * m]) = (r1_2 + tmpr); c_im(kp[1 * m]) = (i1_2 + tmpi); c_re(kp[9 * m]) = (r1_2 - tmpr); c_im(kp[9 * m]) = (i1_2 - tmpi); tmpr = (0.707106781187 * (r1_5 + i1_5)); tmpi = (0.707106781187 * (i1_5 - r1_5)); c_re(kp[2 * m]) = (r1_4 + tmpr); c_im(kp[2 * m]) = (i1_4 + tmpi); c_re(kp[10 * m]) = (r1_4 - tmpr); c_im(kp[10 * m]) = (i1_4 - tmpi); tmpr = ((0.382683432365 * r1_7) + (0.923879532511 * i1_7)); tmpi = ((0.382683432365 * i1_7) - (0.923879532511 * r1_7)); c_re(kp[3 * m]) = (r1_6 + tmpr); c_im(kp[3 * m]) = (i1_6 + tmpi); c_re(kp[11 * m]) = (r1_6 - tmpr); c_im(kp[11 * m]) = (i1_6 - tmpi); c_re(kp[4 * m]) = (r1_8 + i1_9); c_im(kp[4 * m]) = (i1_8 - r1_9); c_re(kp[12 * m]) = (r1_8 - i1_9); c_im(kp[12 * m]) = (i1_8 + r1_9); tmpr = ((0.923879532511 * i1_11) - (0.382683432365 * r1_11)); tmpi = ((0.923879532511 * r1_11) + (0.382683432365 * i1_11)); c_re(kp[5 * m]) = (r1_10 + tmpr); c_im(kp[5 * m]) = (i1_10 - tmpi); c_re(kp[13 * m]) = (r1_10 - tmpr); c_im(kp[13 * m]) = (i1_10 + tmpi); tmpr = (0.707106781187 * (i1_13 - r1_13)); tmpi = (0.707106781187 * (r1_13 + i1_13)); c_re(kp[6 * m]) = (r1_12 + tmpr); c_im(kp[6 * m]) = (i1_12 - tmpi); c_re(kp[14 * m]) = (r1_12 - tmpr); c_im(kp[14 * m]) = (i1_12 + tmpi); tmpr = ((0.382683432365 * i1_15) - (0.923879532511 * r1_15)); tmpi = ((0.382683432365 * r1_15) + (0.923879532511 * i1_15)); c_re(kp[7 * m]) = (r1_14 + tmpr); c_im(kp[7 * m]) = (i1_14 - tmpi); c_re(kp[15 * m]) = (r1_14 - tmpr); c_im(kp[15 * m]) = (i1_14 + tmpi); } } } else { int ab = (a + b) / 2; #pragma omp task fft_twiddle_16(a, ab, in, out, W, nW, nWdn, m); #pragma omp task fft_twiddle_16(ab, b, in, out, W, nW, nWdn, m); #pragma omp taskwait } } void fft_twiddle_16_seq(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; REAL r1_8, i1_8; REAL r1_9, i1_9; REAL r1_10, i1_10; REAL r1_11, i1_11; REAL r1_12, i1_12; REAL r1_13, i1_13; REAL r1_14, i1_14; REAL r1_15, i1_15; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; REAL r2_8, i2_8; REAL r2_10, i2_10; REAL r2_12, i2_12; REAL r2_14, i2_14; { REAL r3_0, i3_0; REAL r3_4, i3_4; REAL r3_8, i3_8; REAL r3_12, i3_12; { REAL r4_0, i4_0; REAL r4_8, i4_8; r4_0 = c_re(jp[0 * m]); i4_0 = c_im(jp[0 * m]); wr = c_re(W[8 * l1]); wi = c_im(W[8 * l1]); tmpr = c_re(jp[8 * m]); tmpi = c_im(jp[8 * m]); r4_8 = ((wr * tmpr) - (wi * tmpi)); i4_8 = ((wi * tmpr) + (wr * tmpi)); r3_0 = (r4_0 + r4_8); i3_0 = (i4_0 + i4_8); r3_8 = (r4_0 - r4_8); i3_8 = (i4_0 - i4_8); } { REAL r4_4, i4_4; REAL r4_12, i4_12; wr = c_re(W[4 * l1]); wi = c_im(W[4 * l1]); tmpr = c_re(jp[4 * m]); tmpi = c_im(jp[4 * m]); r4_4 = ((wr * tmpr) - (wi * tmpi)); i4_4 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[12 * l1]); wi = c_im(W[12 * l1]); tmpr = c_re(jp[12 * m]); tmpi = c_im(jp[12 * m]); r4_12 = ((wr * tmpr) - (wi * tmpi)); i4_12 = ((wi * tmpr) + (wr * tmpi)); r3_4 = (r4_4 + r4_12); i3_4 = (i4_4 + i4_12); r3_12 = (r4_4 - r4_12); i3_12 = (i4_4 - i4_12); } r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_8 = (r3_0 - r3_4); i2_8 = (i3_0 - i3_4); r2_4 = (r3_8 + i3_12); i2_4 = (i3_8 - r3_12); r2_12 = (r3_8 - i3_12); i2_12 = (i3_8 + r3_12); } { REAL r3_2, i3_2; REAL r3_6, i3_6; REAL r3_10, i3_10; REAL r3_14, i3_14; { REAL r4_2, i4_2; REAL r4_10, i4_10; wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r4_2 = ((wr * tmpr) - (wi * tmpi)); i4_2 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[10 * l1]); wi = c_im(W[10 * l1]); tmpr = c_re(jp[10 * m]); tmpi = c_im(jp[10 * m]); r4_10 = ((wr * tmpr) - (wi * tmpi)); i4_10 = ((wi * tmpr) + (wr * tmpi)); r3_2 = (r4_2 + r4_10); i3_2 = (i4_2 + i4_10); r3_10 = (r4_2 - r4_10); i3_10 = (i4_2 - i4_10); } { REAL r4_6, i4_6; REAL r4_14, i4_14; wr = c_re(W[6 * l1]); wi = c_im(W[6 * l1]); tmpr = c_re(jp[6 * m]); tmpi = c_im(jp[6 * m]); r4_6 = ((wr * tmpr) - (wi * tmpi)); i4_6 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[14 * l1]); wi = c_im(W[14 * l1]); tmpr = c_re(jp[14 * m]); tmpi = c_im(jp[14 * m]); r4_14 = ((wr * tmpr) - (wi * tmpi)); i4_14 = ((wi * tmpr) + (wr * tmpi)); r3_6 = (r4_6 + r4_14); i3_6 = (i4_6 + i4_14); r3_14 = (r4_6 - r4_14); i3_14 = (i4_6 - i4_14); } r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_10 = (r3_2 - r3_6); i2_10 = (i3_2 - i3_6); r2_6 = (r3_10 + i3_14); i2_6 = (i3_10 - r3_14); r2_14 = (r3_10 - i3_14); i2_14 = (i3_10 + r3_14); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_8 = (r2_0 - r2_2); i1_8 = (i2_0 - i2_2); tmpr = (0.707106781187 * (r2_6 + i2_6)); tmpi = (0.707106781187 * (i2_6 - r2_6)); r1_2 = (r2_4 + tmpr); i1_2 = (i2_4 + tmpi); r1_10 = (r2_4 - tmpr); i1_10 = (i2_4 - tmpi); r1_4 = (r2_8 + i2_10); i1_4 = (i2_8 - r2_10); r1_12 = (r2_8 - i2_10); i1_12 = (i2_8 + r2_10); tmpr = (0.707106781187 * (i2_14 - r2_14)); tmpi = (0.707106781187 * (r2_14 + i2_14)); r1_6 = (r2_12 + tmpr); i1_6 = (i2_12 - tmpi); r1_14 = (r2_12 - tmpr); i1_14 = (i2_12 + tmpi); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; REAL r2_9, i2_9; REAL r2_11, i2_11; REAL r2_13, i2_13; REAL r2_15, i2_15; { REAL r3_1, i3_1; REAL r3_5, i3_5; REAL r3_9, i3_9; REAL r3_13, i3_13; { REAL r4_1, i4_1; REAL r4_9, i4_9; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r4_1 = ((wr * tmpr) - (wi * tmpi)); i4_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[9 * l1]); wi = c_im(W[9 * l1]); tmpr = c_re(jp[9 * m]); tmpi = c_im(jp[9 * m]); r4_9 = ((wr * tmpr) - (wi * tmpi)); i4_9 = ((wi * tmpr) + (wr * tmpi)); r3_1 = (r4_1 + r4_9); i3_1 = (i4_1 + i4_9); r3_9 = (r4_1 - r4_9); i3_9 = (i4_1 - i4_9); } { REAL r4_5, i4_5; REAL r4_13, i4_13; wr = c_re(W[5 * l1]); wi = c_im(W[5 * l1]); tmpr = c_re(jp[5 * m]); tmpi = c_im(jp[5 * m]); r4_5 = ((wr * tmpr) - (wi * tmpi)); i4_5 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[13 * l1]); wi = c_im(W[13 * l1]); tmpr = c_re(jp[13 * m]); tmpi = c_im(jp[13 * m]); r4_13 = ((wr * tmpr) - (wi * tmpi)); i4_13 = ((wi * tmpr) + (wr * tmpi)); r3_5 = (r4_5 + r4_13); i3_5 = (i4_5 + i4_13); r3_13 = (r4_5 - r4_13); i3_13 = (i4_5 - i4_13); } r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_9 = (r3_1 - r3_5); i2_9 = (i3_1 - i3_5); r2_5 = (r3_9 + i3_13); i2_5 = (i3_9 - r3_13); r2_13 = (r3_9 - i3_13); i2_13 = (i3_9 + r3_13); } { REAL r3_3, i3_3; REAL r3_7, i3_7; REAL r3_11, i3_11; REAL r3_15, i3_15; { REAL r4_3, i4_3; REAL r4_11, i4_11; wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r4_3 = ((wr * tmpr) - (wi * tmpi)); i4_3 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[11 * l1]); wi = c_im(W[11 * l1]); tmpr = c_re(jp[11 * m]); tmpi = c_im(jp[11 * m]); r4_11 = ((wr * tmpr) - (wi * tmpi)); i4_11 = ((wi * tmpr) + (wr * tmpi)); r3_3 = (r4_3 + r4_11); i3_3 = (i4_3 + i4_11); r3_11 = (r4_3 - r4_11); i3_11 = (i4_3 - i4_11); } { REAL r4_7, i4_7; REAL r4_15, i4_15; wr = c_re(W[7 * l1]); wi = c_im(W[7 * l1]); tmpr = c_re(jp[7 * m]); tmpi = c_im(jp[7 * m]); r4_7 = ((wr * tmpr) - (wi * tmpi)); i4_7 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[15 * l1]); wi = c_im(W[15 * l1]); tmpr = c_re(jp[15 * m]); tmpi = c_im(jp[15 * m]); r4_15 = ((wr * tmpr) - (wi * tmpi)); i4_15 = ((wi * tmpr) + (wr * tmpi)); r3_7 = (r4_7 + r4_15); i3_7 = (i4_7 + i4_15); r3_15 = (r4_7 - r4_15); i3_15 = (i4_7 - i4_15); } r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_11 = (r3_3 - r3_7); i2_11 = (i3_3 - i3_7); r2_7 = (r3_11 + i3_15); i2_7 = (i3_11 - r3_15); r2_15 = (r3_11 - i3_15); i2_15 = (i3_11 + r3_15); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_9 = (r2_1 - r2_3); i1_9 = (i2_1 - i2_3); tmpr = (0.707106781187 * (r2_7 + i2_7)); tmpi = (0.707106781187 * (i2_7 - r2_7)); r1_3 = (r2_5 + tmpr); i1_3 = (i2_5 + tmpi); r1_11 = (r2_5 - tmpr); i1_11 = (i2_5 - tmpi); r1_5 = (r2_9 + i2_11); i1_5 = (i2_9 - r2_11); r1_13 = (r2_9 - i2_11); i1_13 = (i2_9 + r2_11); tmpr = (0.707106781187 * (i2_15 - r2_15)); tmpi = (0.707106781187 * (r2_15 + i2_15)); r1_7 = (r2_13 + tmpr); i1_7 = (i2_13 - tmpi); r1_15 = (r2_13 - tmpr); i1_15 = (i2_13 + tmpi); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[8 * m]) = (r1_0 - r1_1); c_im(kp[8 * m]) = (i1_0 - i1_1); tmpr = ((0.923879532511 * r1_3) + (0.382683432365 * i1_3)); tmpi = ((0.923879532511 * i1_3) - (0.382683432365 * r1_3)); c_re(kp[1 * m]) = (r1_2 + tmpr); c_im(kp[1 * m]) = (i1_2 + tmpi); c_re(kp[9 * m]) = (r1_2 - tmpr); c_im(kp[9 * m]) = (i1_2 - tmpi); tmpr = (0.707106781187 * (r1_5 + i1_5)); tmpi = (0.707106781187 * (i1_5 - r1_5)); c_re(kp[2 * m]) = (r1_4 + tmpr); c_im(kp[2 * m]) = (i1_4 + tmpi); c_re(kp[10 * m]) = (r1_4 - tmpr); c_im(kp[10 * m]) = (i1_4 - tmpi); tmpr = ((0.382683432365 * r1_7) + (0.923879532511 * i1_7)); tmpi = ((0.382683432365 * i1_7) - (0.923879532511 * r1_7)); c_re(kp[3 * m]) = (r1_6 + tmpr); c_im(kp[3 * m]) = (i1_6 + tmpi); c_re(kp[11 * m]) = (r1_6 - tmpr); c_im(kp[11 * m]) = (i1_6 - tmpi); c_re(kp[4 * m]) = (r1_8 + i1_9); c_im(kp[4 * m]) = (i1_8 - r1_9); c_re(kp[12 * m]) = (r1_8 - i1_9); c_im(kp[12 * m]) = (i1_8 + r1_9); tmpr = ((0.923879532511 * i1_11) - (0.382683432365 * r1_11)); tmpi = ((0.923879532511 * r1_11) + (0.382683432365 * i1_11)); c_re(kp[5 * m]) = (r1_10 + tmpr); c_im(kp[5 * m]) = (i1_10 - tmpi); c_re(kp[13 * m]) = (r1_10 - tmpr); c_im(kp[13 * m]) = (i1_10 + tmpi); tmpr = (0.707106781187 * (i1_13 - r1_13)); tmpi = (0.707106781187 * (r1_13 + i1_13)); c_re(kp[6 * m]) = (r1_12 + tmpr); c_im(kp[6 * m]) = (i1_12 - tmpi); c_re(kp[14 * m]) = (r1_12 - tmpr); c_im(kp[14 * m]) = (i1_12 + tmpi); tmpr = ((0.382683432365 * i1_15) - (0.923879532511 * r1_15)); tmpi = ((0.382683432365 * r1_15) + (0.923879532511 * i1_15)); c_re(kp[7 * m]) = (r1_14 + tmpr); c_im(kp[7 * m]) = (i1_14 - tmpi); c_re(kp[15 * m]) = (r1_14 - tmpr); c_im(kp[15 * m]) = (i1_14 + tmpi); } } } else { int ab = (a + b) / 2; fft_twiddle_16_seq(a, ab, in, out, W, nW, nWdn, m); fft_twiddle_16_seq(ab, b, in, out, W, nW, nWdn, m); } } void fft_unshuffle_16(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 16; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; #pragma omp task fft_unshuffle_16(a, ab, in, out, m); #pragma omp task fft_unshuffle_16(ab, b, in, out, m); #pragma omp taskwait } } void fft_unshuffle_16_seq(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 16; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; fft_unshuffle_16_seq(a, ab, in, out, m); fft_unshuffle_16_seq(ab, b, in, out, m); } } void fft_base_32(COMPLEX * in, COMPLEX * out) { REAL tmpr, tmpi; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; REAL r1_8, i1_8; REAL r1_9, i1_9; REAL r1_10, i1_10; REAL r1_11, i1_11; REAL r1_12, i1_12; REAL r1_13, i1_13; REAL r1_14, i1_14; REAL r1_15, i1_15; REAL r1_16, i1_16; REAL r1_17, i1_17; REAL r1_18, i1_18; REAL r1_19, i1_19; REAL r1_20, i1_20; REAL r1_21, i1_21; REAL r1_22, i1_22; REAL r1_23, i1_23; REAL r1_24, i1_24; REAL r1_25, i1_25; REAL r1_26, i1_26; REAL r1_27, i1_27; REAL r1_28, i1_28; REAL r1_29, i1_29; REAL r1_30, i1_30; REAL r1_31, i1_31; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; REAL r2_8, i2_8; REAL r2_10, i2_10; REAL r2_12, i2_12; REAL r2_14, i2_14; REAL r2_16, i2_16; REAL r2_18, i2_18; REAL r2_20, i2_20; REAL r2_22, i2_22; REAL r2_24, i2_24; REAL r2_26, i2_26; REAL r2_28, i2_28; REAL r2_30, i2_30; { REAL r3_0, i3_0; REAL r3_4, i3_4; REAL r3_8, i3_8; REAL r3_12, i3_12; REAL r3_16, i3_16; REAL r3_20, i3_20; REAL r3_24, i3_24; REAL r3_28, i3_28; { REAL r4_0, i4_0; REAL r4_8, i4_8; REAL r4_16, i4_16; REAL r4_24, i4_24; { REAL r5_0, i5_0; REAL r5_16, i5_16; r5_0 = c_re(in[0]); i5_0 = c_im(in[0]); r5_16 = c_re(in[16]); i5_16 = c_im(in[16]); r4_0 = (r5_0 + r5_16); i4_0 = (i5_0 + i5_16); r4_16 = (r5_0 - r5_16); i4_16 = (i5_0 - i5_16); } { REAL r5_8, i5_8; REAL r5_24, i5_24; r5_8 = c_re(in[8]); i5_8 = c_im(in[8]); r5_24 = c_re(in[24]); i5_24 = c_im(in[24]); r4_8 = (r5_8 + r5_24); i4_8 = (i5_8 + i5_24); r4_24 = (r5_8 - r5_24); i4_24 = (i5_8 - i5_24); } r3_0 = (r4_0 + r4_8); i3_0 = (i4_0 + i4_8); r3_16 = (r4_0 - r4_8); i3_16 = (i4_0 - i4_8); r3_8 = (r4_16 + i4_24); i3_8 = (i4_16 - r4_24); r3_24 = (r4_16 - i4_24); i3_24 = (i4_16 + r4_24); } { REAL r4_4, i4_4; REAL r4_12, i4_12; REAL r4_20, i4_20; REAL r4_28, i4_28; { REAL r5_4, i5_4; REAL r5_20, i5_20; r5_4 = c_re(in[4]); i5_4 = c_im(in[4]); r5_20 = c_re(in[20]); i5_20 = c_im(in[20]); r4_4 = (r5_4 + r5_20); i4_4 = (i5_4 + i5_20); r4_20 = (r5_4 - r5_20); i4_20 = (i5_4 - i5_20); } { REAL r5_12, i5_12; REAL r5_28, i5_28; r5_12 = c_re(in[12]); i5_12 = c_im(in[12]); r5_28 = c_re(in[28]); i5_28 = c_im(in[28]); r4_12 = (r5_12 + r5_28); i4_12 = (i5_12 + i5_28); r4_28 = (r5_12 - r5_28); i4_28 = (i5_12 - i5_28); } r3_4 = (r4_4 + r4_12); i3_4 = (i4_4 + i4_12); r3_20 = (r4_4 - r4_12); i3_20 = (i4_4 - i4_12); r3_12 = (r4_20 + i4_28); i3_12 = (i4_20 - r4_28); r3_28 = (r4_20 - i4_28); i3_28 = (i4_20 + r4_28); } r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_16 = (r3_0 - r3_4); i2_16 = (i3_0 - i3_4); tmpr = (0.707106781187 * (r3_12 + i3_12)); tmpi = (0.707106781187 * (i3_12 - r3_12)); r2_4 = (r3_8 + tmpr); i2_4 = (i3_8 + tmpi); r2_20 = (r3_8 - tmpr); i2_20 = (i3_8 - tmpi); r2_8 = (r3_16 + i3_20); i2_8 = (i3_16 - r3_20); r2_24 = (r3_16 - i3_20); i2_24 = (i3_16 + r3_20); tmpr = (0.707106781187 * (i3_28 - r3_28)); tmpi = (0.707106781187 * (r3_28 + i3_28)); r2_12 = (r3_24 + tmpr); i2_12 = (i3_24 - tmpi); r2_28 = (r3_24 - tmpr); i2_28 = (i3_24 + tmpi); } { REAL r3_2, i3_2; REAL r3_6, i3_6; REAL r3_10, i3_10; REAL r3_14, i3_14; REAL r3_18, i3_18; REAL r3_22, i3_22; REAL r3_26, i3_26; REAL r3_30, i3_30; { REAL r4_2, i4_2; REAL r4_10, i4_10; REAL r4_18, i4_18; REAL r4_26, i4_26; { REAL r5_2, i5_2; REAL r5_18, i5_18; r5_2 = c_re(in[2]); i5_2 = c_im(in[2]); r5_18 = c_re(in[18]); i5_18 = c_im(in[18]); r4_2 = (r5_2 + r5_18); i4_2 = (i5_2 + i5_18); r4_18 = (r5_2 - r5_18); i4_18 = (i5_2 - i5_18); } { REAL r5_10, i5_10; REAL r5_26, i5_26; r5_10 = c_re(in[10]); i5_10 = c_im(in[10]); r5_26 = c_re(in[26]); i5_26 = c_im(in[26]); r4_10 = (r5_10 + r5_26); i4_10 = (i5_10 + i5_26); r4_26 = (r5_10 - r5_26); i4_26 = (i5_10 - i5_26); } r3_2 = (r4_2 + r4_10); i3_2 = (i4_2 + i4_10); r3_18 = (r4_2 - r4_10); i3_18 = (i4_2 - i4_10); r3_10 = (r4_18 + i4_26); i3_10 = (i4_18 - r4_26); r3_26 = (r4_18 - i4_26); i3_26 = (i4_18 + r4_26); } { REAL r4_6, i4_6; REAL r4_14, i4_14; REAL r4_22, i4_22; REAL r4_30, i4_30; { REAL r5_6, i5_6; REAL r5_22, i5_22; r5_6 = c_re(in[6]); i5_6 = c_im(in[6]); r5_22 = c_re(in[22]); i5_22 = c_im(in[22]); r4_6 = (r5_6 + r5_22); i4_6 = (i5_6 + i5_22); r4_22 = (r5_6 - r5_22); i4_22 = (i5_6 - i5_22); } { REAL r5_14, i5_14; REAL r5_30, i5_30; r5_14 = c_re(in[14]); i5_14 = c_im(in[14]); r5_30 = c_re(in[30]); i5_30 = c_im(in[30]); r4_14 = (r5_14 + r5_30); i4_14 = (i5_14 + i5_30); r4_30 = (r5_14 - r5_30); i4_30 = (i5_14 - i5_30); } r3_6 = (r4_6 + r4_14); i3_6 = (i4_6 + i4_14); r3_22 = (r4_6 - r4_14); i3_22 = (i4_6 - i4_14); r3_14 = (r4_22 + i4_30); i3_14 = (i4_22 - r4_30); r3_30 = (r4_22 - i4_30); i3_30 = (i4_22 + r4_30); } r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_18 = (r3_2 - r3_6); i2_18 = (i3_2 - i3_6); tmpr = (0.707106781187 * (r3_14 + i3_14)); tmpi = (0.707106781187 * (i3_14 - r3_14)); r2_6 = (r3_10 + tmpr); i2_6 = (i3_10 + tmpi); r2_22 = (r3_10 - tmpr); i2_22 = (i3_10 - tmpi); r2_10 = (r3_18 + i3_22); i2_10 = (i3_18 - r3_22); r2_26 = (r3_18 - i3_22); i2_26 = (i3_18 + r3_22); tmpr = (0.707106781187 * (i3_30 - r3_30)); tmpi = (0.707106781187 * (r3_30 + i3_30)); r2_14 = (r3_26 + tmpr); i2_14 = (i3_26 - tmpi); r2_30 = (r3_26 - tmpr); i2_30 = (i3_26 + tmpi); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_16 = (r2_0 - r2_2); i1_16 = (i2_0 - i2_2); tmpr = ((0.923879532511 * r2_6) + (0.382683432365 * i2_6)); tmpi = ((0.923879532511 * i2_6) - (0.382683432365 * r2_6)); r1_2 = (r2_4 + tmpr); i1_2 = (i2_4 + tmpi); r1_18 = (r2_4 - tmpr); i1_18 = (i2_4 - tmpi); tmpr = (0.707106781187 * (r2_10 + i2_10)); tmpi = (0.707106781187 * (i2_10 - r2_10)); r1_4 = (r2_8 + tmpr); i1_4 = (i2_8 + tmpi); r1_20 = (r2_8 - tmpr); i1_20 = (i2_8 - tmpi); tmpr = ((0.382683432365 * r2_14) + (0.923879532511 * i2_14)); tmpi = ((0.382683432365 * i2_14) - (0.923879532511 * r2_14)); r1_6 = (r2_12 + tmpr); i1_6 = (i2_12 + tmpi); r1_22 = (r2_12 - tmpr); i1_22 = (i2_12 - tmpi); r1_8 = (r2_16 + i2_18); i1_8 = (i2_16 - r2_18); r1_24 = (r2_16 - i2_18); i1_24 = (i2_16 + r2_18); tmpr = ((0.923879532511 * i2_22) - (0.382683432365 * r2_22)); tmpi = ((0.923879532511 * r2_22) + (0.382683432365 * i2_22)); r1_10 = (r2_20 + tmpr); i1_10 = (i2_20 - tmpi); r1_26 = (r2_20 - tmpr); i1_26 = (i2_20 + tmpi); tmpr = (0.707106781187 * (i2_26 - r2_26)); tmpi = (0.707106781187 * (r2_26 + i2_26)); r1_12 = (r2_24 + tmpr); i1_12 = (i2_24 - tmpi); r1_28 = (r2_24 - tmpr); i1_28 = (i2_24 + tmpi); tmpr = ((0.382683432365 * i2_30) - (0.923879532511 * r2_30)); tmpi = ((0.382683432365 * r2_30) + (0.923879532511 * i2_30)); r1_14 = (r2_28 + tmpr); i1_14 = (i2_28 - tmpi); r1_30 = (r2_28 - tmpr); i1_30 = (i2_28 + tmpi); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; REAL r2_9, i2_9; REAL r2_11, i2_11; REAL r2_13, i2_13; REAL r2_15, i2_15; REAL r2_17, i2_17; REAL r2_19, i2_19; REAL r2_21, i2_21; REAL r2_23, i2_23; REAL r2_25, i2_25; REAL r2_27, i2_27; REAL r2_29, i2_29; REAL r2_31, i2_31; { REAL r3_1, i3_1; REAL r3_5, i3_5; REAL r3_9, i3_9; REAL r3_13, i3_13; REAL r3_17, i3_17; REAL r3_21, i3_21; REAL r3_25, i3_25; REAL r3_29, i3_29; { REAL r4_1, i4_1; REAL r4_9, i4_9; REAL r4_17, i4_17; REAL r4_25, i4_25; { REAL r5_1, i5_1; REAL r5_17, i5_17; r5_1 = c_re(in[1]); i5_1 = c_im(in[1]); r5_17 = c_re(in[17]); i5_17 = c_im(in[17]); r4_1 = (r5_1 + r5_17); i4_1 = (i5_1 + i5_17); r4_17 = (r5_1 - r5_17); i4_17 = (i5_1 - i5_17); } { REAL r5_9, i5_9; REAL r5_25, i5_25; r5_9 = c_re(in[9]); i5_9 = c_im(in[9]); r5_25 = c_re(in[25]); i5_25 = c_im(in[25]); r4_9 = (r5_9 + r5_25); i4_9 = (i5_9 + i5_25); r4_25 = (r5_9 - r5_25); i4_25 = (i5_9 - i5_25); } r3_1 = (r4_1 + r4_9); i3_1 = (i4_1 + i4_9); r3_17 = (r4_1 - r4_9); i3_17 = (i4_1 - i4_9); r3_9 = (r4_17 + i4_25); i3_9 = (i4_17 - r4_25); r3_25 = (r4_17 - i4_25); i3_25 = (i4_17 + r4_25); } { REAL r4_5, i4_5; REAL r4_13, i4_13; REAL r4_21, i4_21; REAL r4_29, i4_29; { REAL r5_5, i5_5; REAL r5_21, i5_21; r5_5 = c_re(in[5]); i5_5 = c_im(in[5]); r5_21 = c_re(in[21]); i5_21 = c_im(in[21]); r4_5 = (r5_5 + r5_21); i4_5 = (i5_5 + i5_21); r4_21 = (r5_5 - r5_21); i4_21 = (i5_5 - i5_21); } { REAL r5_13, i5_13; REAL r5_29, i5_29; r5_13 = c_re(in[13]); i5_13 = c_im(in[13]); r5_29 = c_re(in[29]); i5_29 = c_im(in[29]); r4_13 = (r5_13 + r5_29); i4_13 = (i5_13 + i5_29); r4_29 = (r5_13 - r5_29); i4_29 = (i5_13 - i5_29); } r3_5 = (r4_5 + r4_13); i3_5 = (i4_5 + i4_13); r3_21 = (r4_5 - r4_13); i3_21 = (i4_5 - i4_13); r3_13 = (r4_21 + i4_29); i3_13 = (i4_21 - r4_29); r3_29 = (r4_21 - i4_29); i3_29 = (i4_21 + r4_29); } r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_17 = (r3_1 - r3_5); i2_17 = (i3_1 - i3_5); tmpr = (0.707106781187 * (r3_13 + i3_13)); tmpi = (0.707106781187 * (i3_13 - r3_13)); r2_5 = (r3_9 + tmpr); i2_5 = (i3_9 + tmpi); r2_21 = (r3_9 - tmpr); i2_21 = (i3_9 - tmpi); r2_9 = (r3_17 + i3_21); i2_9 = (i3_17 - r3_21); r2_25 = (r3_17 - i3_21); i2_25 = (i3_17 + r3_21); tmpr = (0.707106781187 * (i3_29 - r3_29)); tmpi = (0.707106781187 * (r3_29 + i3_29)); r2_13 = (r3_25 + tmpr); i2_13 = (i3_25 - tmpi); r2_29 = (r3_25 - tmpr); i2_29 = (i3_25 + tmpi); } { REAL r3_3, i3_3; REAL r3_7, i3_7; REAL r3_11, i3_11; REAL r3_15, i3_15; REAL r3_19, i3_19; REAL r3_23, i3_23; REAL r3_27, i3_27; REAL r3_31, i3_31; { REAL r4_3, i4_3; REAL r4_11, i4_11; REAL r4_19, i4_19; REAL r4_27, i4_27; { REAL r5_3, i5_3; REAL r5_19, i5_19; r5_3 = c_re(in[3]); i5_3 = c_im(in[3]); r5_19 = c_re(in[19]); i5_19 = c_im(in[19]); r4_3 = (r5_3 + r5_19); i4_3 = (i5_3 + i5_19); r4_19 = (r5_3 - r5_19); i4_19 = (i5_3 - i5_19); } { REAL r5_11, i5_11; REAL r5_27, i5_27; r5_11 = c_re(in[11]); i5_11 = c_im(in[11]); r5_27 = c_re(in[27]); i5_27 = c_im(in[27]); r4_11 = (r5_11 + r5_27); i4_11 = (i5_11 + i5_27); r4_27 = (r5_11 - r5_27); i4_27 = (i5_11 - i5_27); } r3_3 = (r4_3 + r4_11); i3_3 = (i4_3 + i4_11); r3_19 = (r4_3 - r4_11); i3_19 = (i4_3 - i4_11); r3_11 = (r4_19 + i4_27); i3_11 = (i4_19 - r4_27); r3_27 = (r4_19 - i4_27); i3_27 = (i4_19 + r4_27); } { REAL r4_7, i4_7; REAL r4_15, i4_15; REAL r4_23, i4_23; REAL r4_31, i4_31; { REAL r5_7, i5_7; REAL r5_23, i5_23; r5_7 = c_re(in[7]); i5_7 = c_im(in[7]); r5_23 = c_re(in[23]); i5_23 = c_im(in[23]); r4_7 = (r5_7 + r5_23); i4_7 = (i5_7 + i5_23); r4_23 = (r5_7 - r5_23); i4_23 = (i5_7 - i5_23); } { REAL r5_15, i5_15; REAL r5_31, i5_31; r5_15 = c_re(in[15]); i5_15 = c_im(in[15]); r5_31 = c_re(in[31]); i5_31 = c_im(in[31]); r4_15 = (r5_15 + r5_31); i4_15 = (i5_15 + i5_31); r4_31 = (r5_15 - r5_31); i4_31 = (i5_15 - i5_31); } r3_7 = (r4_7 + r4_15); i3_7 = (i4_7 + i4_15); r3_23 = (r4_7 - r4_15); i3_23 = (i4_7 - i4_15); r3_15 = (r4_23 + i4_31); i3_15 = (i4_23 - r4_31); r3_31 = (r4_23 - i4_31); i3_31 = (i4_23 + r4_31); } r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_19 = (r3_3 - r3_7); i2_19 = (i3_3 - i3_7); tmpr = (0.707106781187 * (r3_15 + i3_15)); tmpi = (0.707106781187 * (i3_15 - r3_15)); r2_7 = (r3_11 + tmpr); i2_7 = (i3_11 + tmpi); r2_23 = (r3_11 - tmpr); i2_23 = (i3_11 - tmpi); r2_11 = (r3_19 + i3_23); i2_11 = (i3_19 - r3_23); r2_27 = (r3_19 - i3_23); i2_27 = (i3_19 + r3_23); tmpr = (0.707106781187 * (i3_31 - r3_31)); tmpi = (0.707106781187 * (r3_31 + i3_31)); r2_15 = (r3_27 + tmpr); i2_15 = (i3_27 - tmpi); r2_31 = (r3_27 - tmpr); i2_31 = (i3_27 + tmpi); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_17 = (r2_1 - r2_3); i1_17 = (i2_1 - i2_3); tmpr = ((0.923879532511 * r2_7) + (0.382683432365 * i2_7)); tmpi = ((0.923879532511 * i2_7) - (0.382683432365 * r2_7)); r1_3 = (r2_5 + tmpr); i1_3 = (i2_5 + tmpi); r1_19 = (r2_5 - tmpr); i1_19 = (i2_5 - tmpi); tmpr = (0.707106781187 * (r2_11 + i2_11)); tmpi = (0.707106781187 * (i2_11 - r2_11)); r1_5 = (r2_9 + tmpr); i1_5 = (i2_9 + tmpi); r1_21 = (r2_9 - tmpr); i1_21 = (i2_9 - tmpi); tmpr = ((0.382683432365 * r2_15) + (0.923879532511 * i2_15)); tmpi = ((0.382683432365 * i2_15) - (0.923879532511 * r2_15)); r1_7 = (r2_13 + tmpr); i1_7 = (i2_13 + tmpi); r1_23 = (r2_13 - tmpr); i1_23 = (i2_13 - tmpi); r1_9 = (r2_17 + i2_19); i1_9 = (i2_17 - r2_19); r1_25 = (r2_17 - i2_19); i1_25 = (i2_17 + r2_19); tmpr = ((0.923879532511 * i2_23) - (0.382683432365 * r2_23)); tmpi = ((0.923879532511 * r2_23) + (0.382683432365 * i2_23)); r1_11 = (r2_21 + tmpr); i1_11 = (i2_21 - tmpi); r1_27 = (r2_21 - tmpr); i1_27 = (i2_21 + tmpi); tmpr = (0.707106781187 * (i2_27 - r2_27)); tmpi = (0.707106781187 * (r2_27 + i2_27)); r1_13 = (r2_25 + tmpr); i1_13 = (i2_25 - tmpi); r1_29 = (r2_25 - tmpr); i1_29 = (i2_25 + tmpi); tmpr = ((0.382683432365 * i2_31) - (0.923879532511 * r2_31)); tmpi = ((0.382683432365 * r2_31) + (0.923879532511 * i2_31)); r1_15 = (r2_29 + tmpr); i1_15 = (i2_29 - tmpi); r1_31 = (r2_29 - tmpr); i1_31 = (i2_29 + tmpi); } c_re(out[0]) = (r1_0 + r1_1); c_im(out[0]) = (i1_0 + i1_1); c_re(out[16]) = (r1_0 - r1_1); c_im(out[16]) = (i1_0 - i1_1); tmpr = ((0.980785280403 * r1_3) + (0.195090322016 * i1_3)); tmpi = ((0.980785280403 * i1_3) - (0.195090322016 * r1_3)); c_re(out[1]) = (r1_2 + tmpr); c_im(out[1]) = (i1_2 + tmpi); c_re(out[17]) = (r1_2 - tmpr); c_im(out[17]) = (i1_2 - tmpi); tmpr = ((0.923879532511 * r1_5) + (0.382683432365 * i1_5)); tmpi = ((0.923879532511 * i1_5) - (0.382683432365 * r1_5)); c_re(out[2]) = (r1_4 + tmpr); c_im(out[2]) = (i1_4 + tmpi); c_re(out[18]) = (r1_4 - tmpr); c_im(out[18]) = (i1_4 - tmpi); tmpr = ((0.831469612303 * r1_7) + (0.55557023302 * i1_7)); tmpi = ((0.831469612303 * i1_7) - (0.55557023302 * r1_7)); c_re(out[3]) = (r1_6 + tmpr); c_im(out[3]) = (i1_6 + tmpi); c_re(out[19]) = (r1_6 - tmpr); c_im(out[19]) = (i1_6 - tmpi); tmpr = (0.707106781187 * (r1_9 + i1_9)); tmpi = (0.707106781187 * (i1_9 - r1_9)); c_re(out[4]) = (r1_8 + tmpr); c_im(out[4]) = (i1_8 + tmpi); c_re(out[20]) = (r1_8 - tmpr); c_im(out[20]) = (i1_8 - tmpi); tmpr = ((0.55557023302 * r1_11) + (0.831469612303 * i1_11)); tmpi = ((0.55557023302 * i1_11) - (0.831469612303 * r1_11)); c_re(out[5]) = (r1_10 + tmpr); c_im(out[5]) = (i1_10 + tmpi); c_re(out[21]) = (r1_10 - tmpr); c_im(out[21]) = (i1_10 - tmpi); tmpr = ((0.382683432365 * r1_13) + (0.923879532511 * i1_13)); tmpi = ((0.382683432365 * i1_13) - (0.923879532511 * r1_13)); c_re(out[6]) = (r1_12 + tmpr); c_im(out[6]) = (i1_12 + tmpi); c_re(out[22]) = (r1_12 - tmpr); c_im(out[22]) = (i1_12 - tmpi); tmpr = ((0.195090322016 * r1_15) + (0.980785280403 * i1_15)); tmpi = ((0.195090322016 * i1_15) - (0.980785280403 * r1_15)); c_re(out[7]) = (r1_14 + tmpr); c_im(out[7]) = (i1_14 + tmpi); c_re(out[23]) = (r1_14 - tmpr); c_im(out[23]) = (i1_14 - tmpi); c_re(out[8]) = (r1_16 + i1_17); c_im(out[8]) = (i1_16 - r1_17); c_re(out[24]) = (r1_16 - i1_17); c_im(out[24]) = (i1_16 + r1_17); tmpr = ((0.980785280403 * i1_19) - (0.195090322016 * r1_19)); tmpi = ((0.980785280403 * r1_19) + (0.195090322016 * i1_19)); c_re(out[9]) = (r1_18 + tmpr); c_im(out[9]) = (i1_18 - tmpi); c_re(out[25]) = (r1_18 - tmpr); c_im(out[25]) = (i1_18 + tmpi); tmpr = ((0.923879532511 * i1_21) - (0.382683432365 * r1_21)); tmpi = ((0.923879532511 * r1_21) + (0.382683432365 * i1_21)); c_re(out[10]) = (r1_20 + tmpr); c_im(out[10]) = (i1_20 - tmpi); c_re(out[26]) = (r1_20 - tmpr); c_im(out[26]) = (i1_20 + tmpi); tmpr = ((0.831469612303 * i1_23) - (0.55557023302 * r1_23)); tmpi = ((0.831469612303 * r1_23) + (0.55557023302 * i1_23)); c_re(out[11]) = (r1_22 + tmpr); c_im(out[11]) = (i1_22 - tmpi); c_re(out[27]) = (r1_22 - tmpr); c_im(out[27]) = (i1_22 + tmpi); tmpr = (0.707106781187 * (i1_25 - r1_25)); tmpi = (0.707106781187 * (r1_25 + i1_25)); c_re(out[12]) = (r1_24 + tmpr); c_im(out[12]) = (i1_24 - tmpi); c_re(out[28]) = (r1_24 - tmpr); c_im(out[28]) = (i1_24 + tmpi); tmpr = ((0.55557023302 * i1_27) - (0.831469612303 * r1_27)); tmpi = ((0.55557023302 * r1_27) + (0.831469612303 * i1_27)); c_re(out[13]) = (r1_26 + tmpr); c_im(out[13]) = (i1_26 - tmpi); c_re(out[29]) = (r1_26 - tmpr); c_im(out[29]) = (i1_26 + tmpi); tmpr = ((0.382683432365 * i1_29) - (0.923879532511 * r1_29)); tmpi = ((0.382683432365 * r1_29) + (0.923879532511 * i1_29)); c_re(out[14]) = (r1_28 + tmpr); c_im(out[14]) = (i1_28 - tmpi); c_re(out[30]) = (r1_28 - tmpr); c_im(out[30]) = (i1_28 + tmpi); tmpr = ((0.195090322016 * i1_31) - (0.980785280403 * r1_31)); tmpi = ((0.195090322016 * r1_31) + (0.980785280403 * i1_31)); c_re(out[15]) = (r1_30 + tmpr); c_im(out[15]) = (i1_30 - tmpi); c_re(out[31]) = (r1_30 - tmpr); c_im(out[31]) = (i1_30 + tmpi); } } void fft_twiddle_32(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; REAL r1_8, i1_8; REAL r1_9, i1_9; REAL r1_10, i1_10; REAL r1_11, i1_11; REAL r1_12, i1_12; REAL r1_13, i1_13; REAL r1_14, i1_14; REAL r1_15, i1_15; REAL r1_16, i1_16; REAL r1_17, i1_17; REAL r1_18, i1_18; REAL r1_19, i1_19; REAL r1_20, i1_20; REAL r1_21, i1_21; REAL r1_22, i1_22; REAL r1_23, i1_23; REAL r1_24, i1_24; REAL r1_25, i1_25; REAL r1_26, i1_26; REAL r1_27, i1_27; REAL r1_28, i1_28; REAL r1_29, i1_29; REAL r1_30, i1_30; REAL r1_31, i1_31; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; REAL r2_8, i2_8; REAL r2_10, i2_10; REAL r2_12, i2_12; REAL r2_14, i2_14; REAL r2_16, i2_16; REAL r2_18, i2_18; REAL r2_20, i2_20; REAL r2_22, i2_22; REAL r2_24, i2_24; REAL r2_26, i2_26; REAL r2_28, i2_28; REAL r2_30, i2_30; { REAL r3_0, i3_0; REAL r3_4, i3_4; REAL r3_8, i3_8; REAL r3_12, i3_12; REAL r3_16, i3_16; REAL r3_20, i3_20; REAL r3_24, i3_24; REAL r3_28, i3_28; { REAL r4_0, i4_0; REAL r4_8, i4_8; REAL r4_16, i4_16; REAL r4_24, i4_24; { REAL r5_0, i5_0; REAL r5_16, i5_16; r5_0 = c_re(jp[0 * m]); i5_0 = c_im(jp[0 * m]); wr = c_re(W[16 * l1]); wi = c_im(W[16 * l1]); tmpr = c_re(jp[16 * m]); tmpi = c_im(jp[16 * m]); r5_16 = ((wr * tmpr) - (wi * tmpi)); i5_16 = ((wi * tmpr) + (wr * tmpi)); r4_0 = (r5_0 + r5_16); i4_0 = (i5_0 + i5_16); r4_16 = (r5_0 - r5_16); i4_16 = (i5_0 - i5_16); } { REAL r5_8, i5_8; REAL r5_24, i5_24; wr = c_re(W[8 * l1]); wi = c_im(W[8 * l1]); tmpr = c_re(jp[8 * m]); tmpi = c_im(jp[8 * m]); r5_8 = ((wr * tmpr) - (wi * tmpi)); i5_8 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[24 * l1]); wi = c_im(W[24 * l1]); tmpr = c_re(jp[24 * m]); tmpi = c_im(jp[24 * m]); r5_24 = ((wr * tmpr) - (wi * tmpi)); i5_24 = ((wi * tmpr) + (wr * tmpi)); r4_8 = (r5_8 + r5_24); i4_8 = (i5_8 + i5_24); r4_24 = (r5_8 - r5_24); i4_24 = (i5_8 - i5_24); } r3_0 = (r4_0 + r4_8); i3_0 = (i4_0 + i4_8); r3_16 = (r4_0 - r4_8); i3_16 = (i4_0 - i4_8); r3_8 = (r4_16 + i4_24); i3_8 = (i4_16 - r4_24); r3_24 = (r4_16 - i4_24); i3_24 = (i4_16 + r4_24); } { REAL r4_4, i4_4; REAL r4_12, i4_12; REAL r4_20, i4_20; REAL r4_28, i4_28; { REAL r5_4, i5_4; REAL r5_20, i5_20; wr = c_re(W[4 * l1]); wi = c_im(W[4 * l1]); tmpr = c_re(jp[4 * m]); tmpi = c_im(jp[4 * m]); r5_4 = ((wr * tmpr) - (wi * tmpi)); i5_4 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[20 * l1]); wi = c_im(W[20 * l1]); tmpr = c_re(jp[20 * m]); tmpi = c_im(jp[20 * m]); r5_20 = ((wr * tmpr) - (wi * tmpi)); i5_20 = ((wi * tmpr) + (wr * tmpi)); r4_4 = (r5_4 + r5_20); i4_4 = (i5_4 + i5_20); r4_20 = (r5_4 - r5_20); i4_20 = (i5_4 - i5_20); } { REAL r5_12, i5_12; REAL r5_28, i5_28; wr = c_re(W[12 * l1]); wi = c_im(W[12 * l1]); tmpr = c_re(jp[12 * m]); tmpi = c_im(jp[12 * m]); r5_12 = ((wr * tmpr) - (wi * tmpi)); i5_12 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[28 * l1]); wi = c_im(W[28 * l1]); tmpr = c_re(jp[28 * m]); tmpi = c_im(jp[28 * m]); r5_28 = ((wr * tmpr) - (wi * tmpi)); i5_28 = ((wi * tmpr) + (wr * tmpi)); r4_12 = (r5_12 + r5_28); i4_12 = (i5_12 + i5_28); r4_28 = (r5_12 - r5_28); i4_28 = (i5_12 - i5_28); } r3_4 = (r4_4 + r4_12); i3_4 = (i4_4 + i4_12); r3_20 = (r4_4 - r4_12); i3_20 = (i4_4 - i4_12); r3_12 = (r4_20 + i4_28); i3_12 = (i4_20 - r4_28); r3_28 = (r4_20 - i4_28); i3_28 = (i4_20 + r4_28); } r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_16 = (r3_0 - r3_4); i2_16 = (i3_0 - i3_4); tmpr = (0.707106781187 * (r3_12 + i3_12)); tmpi = (0.707106781187 * (i3_12 - r3_12)); r2_4 = (r3_8 + tmpr); i2_4 = (i3_8 + tmpi); r2_20 = (r3_8 - tmpr); i2_20 = (i3_8 - tmpi); r2_8 = (r3_16 + i3_20); i2_8 = (i3_16 - r3_20); r2_24 = (r3_16 - i3_20); i2_24 = (i3_16 + r3_20); tmpr = (0.707106781187 * (i3_28 - r3_28)); tmpi = (0.707106781187 * (r3_28 + i3_28)); r2_12 = (r3_24 + tmpr); i2_12 = (i3_24 - tmpi); r2_28 = (r3_24 - tmpr); i2_28 = (i3_24 + tmpi); } { REAL r3_2, i3_2; REAL r3_6, i3_6; REAL r3_10, i3_10; REAL r3_14, i3_14; REAL r3_18, i3_18; REAL r3_22, i3_22; REAL r3_26, i3_26; REAL r3_30, i3_30; { REAL r4_2, i4_2; REAL r4_10, i4_10; REAL r4_18, i4_18; REAL r4_26, i4_26; { REAL r5_2, i5_2; REAL r5_18, i5_18; wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r5_2 = ((wr * tmpr) - (wi * tmpi)); i5_2 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[18 * l1]); wi = c_im(W[18 * l1]); tmpr = c_re(jp[18 * m]); tmpi = c_im(jp[18 * m]); r5_18 = ((wr * tmpr) - (wi * tmpi)); i5_18 = ((wi * tmpr) + (wr * tmpi)); r4_2 = (r5_2 + r5_18); i4_2 = (i5_2 + i5_18); r4_18 = (r5_2 - r5_18); i4_18 = (i5_2 - i5_18); } { REAL r5_10, i5_10; REAL r5_26, i5_26; wr = c_re(W[10 * l1]); wi = c_im(W[10 * l1]); tmpr = c_re(jp[10 * m]); tmpi = c_im(jp[10 * m]); r5_10 = ((wr * tmpr) - (wi * tmpi)); i5_10 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[26 * l1]); wi = c_im(W[26 * l1]); tmpr = c_re(jp[26 * m]); tmpi = c_im(jp[26 * m]); r5_26 = ((wr * tmpr) - (wi * tmpi)); i5_26 = ((wi * tmpr) + (wr * tmpi)); r4_10 = (r5_10 + r5_26); i4_10 = (i5_10 + i5_26); r4_26 = (r5_10 - r5_26); i4_26 = (i5_10 - i5_26); } r3_2 = (r4_2 + r4_10); i3_2 = (i4_2 + i4_10); r3_18 = (r4_2 - r4_10); i3_18 = (i4_2 - i4_10); r3_10 = (r4_18 + i4_26); i3_10 = (i4_18 - r4_26); r3_26 = (r4_18 - i4_26); i3_26 = (i4_18 + r4_26); } { REAL r4_6, i4_6; REAL r4_14, i4_14; REAL r4_22, i4_22; REAL r4_30, i4_30; { REAL r5_6, i5_6; REAL r5_22, i5_22; wr = c_re(W[6 * l1]); wi = c_im(W[6 * l1]); tmpr = c_re(jp[6 * m]); tmpi = c_im(jp[6 * m]); r5_6 = ((wr * tmpr) - (wi * tmpi)); i5_6 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[22 * l1]); wi = c_im(W[22 * l1]); tmpr = c_re(jp[22 * m]); tmpi = c_im(jp[22 * m]); r5_22 = ((wr * tmpr) - (wi * tmpi)); i5_22 = ((wi * tmpr) + (wr * tmpi)); r4_6 = (r5_6 + r5_22); i4_6 = (i5_6 + i5_22); r4_22 = (r5_6 - r5_22); i4_22 = (i5_6 - i5_22); } { REAL r5_14, i5_14; REAL r5_30, i5_30; wr = c_re(W[14 * l1]); wi = c_im(W[14 * l1]); tmpr = c_re(jp[14 * m]); tmpi = c_im(jp[14 * m]); r5_14 = ((wr * tmpr) - (wi * tmpi)); i5_14 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[30 * l1]); wi = c_im(W[30 * l1]); tmpr = c_re(jp[30 * m]); tmpi = c_im(jp[30 * m]); r5_30 = ((wr * tmpr) - (wi * tmpi)); i5_30 = ((wi * tmpr) + (wr * tmpi)); r4_14 = (r5_14 + r5_30); i4_14 = (i5_14 + i5_30); r4_30 = (r5_14 - r5_30); i4_30 = (i5_14 - i5_30); } r3_6 = (r4_6 + r4_14); i3_6 = (i4_6 + i4_14); r3_22 = (r4_6 - r4_14); i3_22 = (i4_6 - i4_14); r3_14 = (r4_22 + i4_30); i3_14 = (i4_22 - r4_30); r3_30 = (r4_22 - i4_30); i3_30 = (i4_22 + r4_30); } r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_18 = (r3_2 - r3_6); i2_18 = (i3_2 - i3_6); tmpr = (0.707106781187 * (r3_14 + i3_14)); tmpi = (0.707106781187 * (i3_14 - r3_14)); r2_6 = (r3_10 + tmpr); i2_6 = (i3_10 + tmpi); r2_22 = (r3_10 - tmpr); i2_22 = (i3_10 - tmpi); r2_10 = (r3_18 + i3_22); i2_10 = (i3_18 - r3_22); r2_26 = (r3_18 - i3_22); i2_26 = (i3_18 + r3_22); tmpr = (0.707106781187 * (i3_30 - r3_30)); tmpi = (0.707106781187 * (r3_30 + i3_30)); r2_14 = (r3_26 + tmpr); i2_14 = (i3_26 - tmpi); r2_30 = (r3_26 - tmpr); i2_30 = (i3_26 + tmpi); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_16 = (r2_0 - r2_2); i1_16 = (i2_0 - i2_2); tmpr = ((0.923879532511 * r2_6) + (0.382683432365 * i2_6)); tmpi = ((0.923879532511 * i2_6) - (0.382683432365 * r2_6)); r1_2 = (r2_4 + tmpr); i1_2 = (i2_4 + tmpi); r1_18 = (r2_4 - tmpr); i1_18 = (i2_4 - tmpi); tmpr = (0.707106781187 * (r2_10 + i2_10)); tmpi = (0.707106781187 * (i2_10 - r2_10)); r1_4 = (r2_8 + tmpr); i1_4 = (i2_8 + tmpi); r1_20 = (r2_8 - tmpr); i1_20 = (i2_8 - tmpi); tmpr = ((0.382683432365 * r2_14) + (0.923879532511 * i2_14)); tmpi = ((0.382683432365 * i2_14) - (0.923879532511 * r2_14)); r1_6 = (r2_12 + tmpr); i1_6 = (i2_12 + tmpi); r1_22 = (r2_12 - tmpr); i1_22 = (i2_12 - tmpi); r1_8 = (r2_16 + i2_18); i1_8 = (i2_16 - r2_18); r1_24 = (r2_16 - i2_18); i1_24 = (i2_16 + r2_18); tmpr = ((0.923879532511 * i2_22) - (0.382683432365 * r2_22)); tmpi = ((0.923879532511 * r2_22) + (0.382683432365 * i2_22)); r1_10 = (r2_20 + tmpr); i1_10 = (i2_20 - tmpi); r1_26 = (r2_20 - tmpr); i1_26 = (i2_20 + tmpi); tmpr = (0.707106781187 * (i2_26 - r2_26)); tmpi = (0.707106781187 * (r2_26 + i2_26)); r1_12 = (r2_24 + tmpr); i1_12 = (i2_24 - tmpi); r1_28 = (r2_24 - tmpr); i1_28 = (i2_24 + tmpi); tmpr = ((0.382683432365 * i2_30) - (0.923879532511 * r2_30)); tmpi = ((0.382683432365 * r2_30) + (0.923879532511 * i2_30)); r1_14 = (r2_28 + tmpr); i1_14 = (i2_28 - tmpi); r1_30 = (r2_28 - tmpr); i1_30 = (i2_28 + tmpi); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; REAL r2_9, i2_9; REAL r2_11, i2_11; REAL r2_13, i2_13; REAL r2_15, i2_15; REAL r2_17, i2_17; REAL r2_19, i2_19; REAL r2_21, i2_21; REAL r2_23, i2_23; REAL r2_25, i2_25; REAL r2_27, i2_27; REAL r2_29, i2_29; REAL r2_31, i2_31; { REAL r3_1, i3_1; REAL r3_5, i3_5; REAL r3_9, i3_9; REAL r3_13, i3_13; REAL r3_17, i3_17; REAL r3_21, i3_21; REAL r3_25, i3_25; REAL r3_29, i3_29; { REAL r4_1, i4_1; REAL r4_9, i4_9; REAL r4_17, i4_17; REAL r4_25, i4_25; { REAL r5_1, i5_1; REAL r5_17, i5_17; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r5_1 = ((wr * tmpr) - (wi * tmpi)); i5_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[17 * l1]); wi = c_im(W[17 * l1]); tmpr = c_re(jp[17 * m]); tmpi = c_im(jp[17 * m]); r5_17 = ((wr * tmpr) - (wi * tmpi)); i5_17 = ((wi * tmpr) + (wr * tmpi)); r4_1 = (r5_1 + r5_17); i4_1 = (i5_1 + i5_17); r4_17 = (r5_1 - r5_17); i4_17 = (i5_1 - i5_17); } { REAL r5_9, i5_9; REAL r5_25, i5_25; wr = c_re(W[9 * l1]); wi = c_im(W[9 * l1]); tmpr = c_re(jp[9 * m]); tmpi = c_im(jp[9 * m]); r5_9 = ((wr * tmpr) - (wi * tmpi)); i5_9 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[25 * l1]); wi = c_im(W[25 * l1]); tmpr = c_re(jp[25 * m]); tmpi = c_im(jp[25 * m]); r5_25 = ((wr * tmpr) - (wi * tmpi)); i5_25 = ((wi * tmpr) + (wr * tmpi)); r4_9 = (r5_9 + r5_25); i4_9 = (i5_9 + i5_25); r4_25 = (r5_9 - r5_25); i4_25 = (i5_9 - i5_25); } r3_1 = (r4_1 + r4_9); i3_1 = (i4_1 + i4_9); r3_17 = (r4_1 - r4_9); i3_17 = (i4_1 - i4_9); r3_9 = (r4_17 + i4_25); i3_9 = (i4_17 - r4_25); r3_25 = (r4_17 - i4_25); i3_25 = (i4_17 + r4_25); } { REAL r4_5, i4_5; REAL r4_13, i4_13; REAL r4_21, i4_21; REAL r4_29, i4_29; { REAL r5_5, i5_5; REAL r5_21, i5_21; wr = c_re(W[5 * l1]); wi = c_im(W[5 * l1]); tmpr = c_re(jp[5 * m]); tmpi = c_im(jp[5 * m]); r5_5 = ((wr * tmpr) - (wi * tmpi)); i5_5 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[21 * l1]); wi = c_im(W[21 * l1]); tmpr = c_re(jp[21 * m]); tmpi = c_im(jp[21 * m]); r5_21 = ((wr * tmpr) - (wi * tmpi)); i5_21 = ((wi * tmpr) + (wr * tmpi)); r4_5 = (r5_5 + r5_21); i4_5 = (i5_5 + i5_21); r4_21 = (r5_5 - r5_21); i4_21 = (i5_5 - i5_21); } { REAL r5_13, i5_13; REAL r5_29, i5_29; wr = c_re(W[13 * l1]); wi = c_im(W[13 * l1]); tmpr = c_re(jp[13 * m]); tmpi = c_im(jp[13 * m]); r5_13 = ((wr * tmpr) - (wi * tmpi)); i5_13 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[29 * l1]); wi = c_im(W[29 * l1]); tmpr = c_re(jp[29 * m]); tmpi = c_im(jp[29 * m]); r5_29 = ((wr * tmpr) - (wi * tmpi)); i5_29 = ((wi * tmpr) + (wr * tmpi)); r4_13 = (r5_13 + r5_29); i4_13 = (i5_13 + i5_29); r4_29 = (r5_13 - r5_29); i4_29 = (i5_13 - i5_29); } r3_5 = (r4_5 + r4_13); i3_5 = (i4_5 + i4_13); r3_21 = (r4_5 - r4_13); i3_21 = (i4_5 - i4_13); r3_13 = (r4_21 + i4_29); i3_13 = (i4_21 - r4_29); r3_29 = (r4_21 - i4_29); i3_29 = (i4_21 + r4_29); } r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_17 = (r3_1 - r3_5); i2_17 = (i3_1 - i3_5); tmpr = (0.707106781187 * (r3_13 + i3_13)); tmpi = (0.707106781187 * (i3_13 - r3_13)); r2_5 = (r3_9 + tmpr); i2_5 = (i3_9 + tmpi); r2_21 = (r3_9 - tmpr); i2_21 = (i3_9 - tmpi); r2_9 = (r3_17 + i3_21); i2_9 = (i3_17 - r3_21); r2_25 = (r3_17 - i3_21); i2_25 = (i3_17 + r3_21); tmpr = (0.707106781187 * (i3_29 - r3_29)); tmpi = (0.707106781187 * (r3_29 + i3_29)); r2_13 = (r3_25 + tmpr); i2_13 = (i3_25 - tmpi); r2_29 = (r3_25 - tmpr); i2_29 = (i3_25 + tmpi); } { REAL r3_3, i3_3; REAL r3_7, i3_7; REAL r3_11, i3_11; REAL r3_15, i3_15; REAL r3_19, i3_19; REAL r3_23, i3_23; REAL r3_27, i3_27; REAL r3_31, i3_31; { REAL r4_3, i4_3; REAL r4_11, i4_11; REAL r4_19, i4_19; REAL r4_27, i4_27; { REAL r5_3, i5_3; REAL r5_19, i5_19; wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r5_3 = ((wr * tmpr) - (wi * tmpi)); i5_3 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[19 * l1]); wi = c_im(W[19 * l1]); tmpr = c_re(jp[19 * m]); tmpi = c_im(jp[19 * m]); r5_19 = ((wr * tmpr) - (wi * tmpi)); i5_19 = ((wi * tmpr) + (wr * tmpi)); r4_3 = (r5_3 + r5_19); i4_3 = (i5_3 + i5_19); r4_19 = (r5_3 - r5_19); i4_19 = (i5_3 - i5_19); } { REAL r5_11, i5_11; REAL r5_27, i5_27; wr = c_re(W[11 * l1]); wi = c_im(W[11 * l1]); tmpr = c_re(jp[11 * m]); tmpi = c_im(jp[11 * m]); r5_11 = ((wr * tmpr) - (wi * tmpi)); i5_11 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[27 * l1]); wi = c_im(W[27 * l1]); tmpr = c_re(jp[27 * m]); tmpi = c_im(jp[27 * m]); r5_27 = ((wr * tmpr) - (wi * tmpi)); i5_27 = ((wi * tmpr) + (wr * tmpi)); r4_11 = (r5_11 + r5_27); i4_11 = (i5_11 + i5_27); r4_27 = (r5_11 - r5_27); i4_27 = (i5_11 - i5_27); } r3_3 = (r4_3 + r4_11); i3_3 = (i4_3 + i4_11); r3_19 = (r4_3 - r4_11); i3_19 = (i4_3 - i4_11); r3_11 = (r4_19 + i4_27); i3_11 = (i4_19 - r4_27); r3_27 = (r4_19 - i4_27); i3_27 = (i4_19 + r4_27); } { REAL r4_7, i4_7; REAL r4_15, i4_15; REAL r4_23, i4_23; REAL r4_31, i4_31; { REAL r5_7, i5_7; REAL r5_23, i5_23; wr = c_re(W[7 * l1]); wi = c_im(W[7 * l1]); tmpr = c_re(jp[7 * m]); tmpi = c_im(jp[7 * m]); r5_7 = ((wr * tmpr) - (wi * tmpi)); i5_7 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[23 * l1]); wi = c_im(W[23 * l1]); tmpr = c_re(jp[23 * m]); tmpi = c_im(jp[23 * m]); r5_23 = ((wr * tmpr) - (wi * tmpi)); i5_23 = ((wi * tmpr) + (wr * tmpi)); r4_7 = (r5_7 + r5_23); i4_7 = (i5_7 + i5_23); r4_23 = (r5_7 - r5_23); i4_23 = (i5_7 - i5_23); } { REAL r5_15, i5_15; REAL r5_31, i5_31; wr = c_re(W[15 * l1]); wi = c_im(W[15 * l1]); tmpr = c_re(jp[15 * m]); tmpi = c_im(jp[15 * m]); r5_15 = ((wr * tmpr) - (wi * tmpi)); i5_15 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[31 * l1]); wi = c_im(W[31 * l1]); tmpr = c_re(jp[31 * m]); tmpi = c_im(jp[31 * m]); r5_31 = ((wr * tmpr) - (wi * tmpi)); i5_31 = ((wi * tmpr) + (wr * tmpi)); r4_15 = (r5_15 + r5_31); i4_15 = (i5_15 + i5_31); r4_31 = (r5_15 - r5_31); i4_31 = (i5_15 - i5_31); } r3_7 = (r4_7 + r4_15); i3_7 = (i4_7 + i4_15); r3_23 = (r4_7 - r4_15); i3_23 = (i4_7 - i4_15); r3_15 = (r4_23 + i4_31); i3_15 = (i4_23 - r4_31); r3_31 = (r4_23 - i4_31); i3_31 = (i4_23 + r4_31); } r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_19 = (r3_3 - r3_7); i2_19 = (i3_3 - i3_7); tmpr = (0.707106781187 * (r3_15 + i3_15)); tmpi = (0.707106781187 * (i3_15 - r3_15)); r2_7 = (r3_11 + tmpr); i2_7 = (i3_11 + tmpi); r2_23 = (r3_11 - tmpr); i2_23 = (i3_11 - tmpi); r2_11 = (r3_19 + i3_23); i2_11 = (i3_19 - r3_23); r2_27 = (r3_19 - i3_23); i2_27 = (i3_19 + r3_23); tmpr = (0.707106781187 * (i3_31 - r3_31)); tmpi = (0.707106781187 * (r3_31 + i3_31)); r2_15 = (r3_27 + tmpr); i2_15 = (i3_27 - tmpi); r2_31 = (r3_27 - tmpr); i2_31 = (i3_27 + tmpi); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_17 = (r2_1 - r2_3); i1_17 = (i2_1 - i2_3); tmpr = ((0.923879532511 * r2_7) + (0.382683432365 * i2_7)); tmpi = ((0.923879532511 * i2_7) - (0.382683432365 * r2_7)); r1_3 = (r2_5 + tmpr); i1_3 = (i2_5 + tmpi); r1_19 = (r2_5 - tmpr); i1_19 = (i2_5 - tmpi); tmpr = (0.707106781187 * (r2_11 + i2_11)); tmpi = (0.707106781187 * (i2_11 - r2_11)); r1_5 = (r2_9 + tmpr); i1_5 = (i2_9 + tmpi); r1_21 = (r2_9 - tmpr); i1_21 = (i2_9 - tmpi); tmpr = ((0.382683432365 * r2_15) + (0.923879532511 * i2_15)); tmpi = ((0.382683432365 * i2_15) - (0.923879532511 * r2_15)); r1_7 = (r2_13 + tmpr); i1_7 = (i2_13 + tmpi); r1_23 = (r2_13 - tmpr); i1_23 = (i2_13 - tmpi); r1_9 = (r2_17 + i2_19); i1_9 = (i2_17 - r2_19); r1_25 = (r2_17 - i2_19); i1_25 = (i2_17 + r2_19); tmpr = ((0.923879532511 * i2_23) - (0.382683432365 * r2_23)); tmpi = ((0.923879532511 * r2_23) + (0.382683432365 * i2_23)); r1_11 = (r2_21 + tmpr); i1_11 = (i2_21 - tmpi); r1_27 = (r2_21 - tmpr); i1_27 = (i2_21 + tmpi); tmpr = (0.707106781187 * (i2_27 - r2_27)); tmpi = (0.707106781187 * (r2_27 + i2_27)); r1_13 = (r2_25 + tmpr); i1_13 = (i2_25 - tmpi); r1_29 = (r2_25 - tmpr); i1_29 = (i2_25 + tmpi); tmpr = ((0.382683432365 * i2_31) - (0.923879532511 * r2_31)); tmpi = ((0.382683432365 * r2_31) + (0.923879532511 * i2_31)); r1_15 = (r2_29 + tmpr); i1_15 = (i2_29 - tmpi); r1_31 = (r2_29 - tmpr); i1_31 = (i2_29 + tmpi); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[16 * m]) = (r1_0 - r1_1); c_im(kp[16 * m]) = (i1_0 - i1_1); tmpr = ((0.980785280403 * r1_3) + (0.195090322016 * i1_3)); tmpi = ((0.980785280403 * i1_3) - (0.195090322016 * r1_3)); c_re(kp[1 * m]) = (r1_2 + tmpr); c_im(kp[1 * m]) = (i1_2 + tmpi); c_re(kp[17 * m]) = (r1_2 - tmpr); c_im(kp[17 * m]) = (i1_2 - tmpi); tmpr = ((0.923879532511 * r1_5) + (0.382683432365 * i1_5)); tmpi = ((0.923879532511 * i1_5) - (0.382683432365 * r1_5)); c_re(kp[2 * m]) = (r1_4 + tmpr); c_im(kp[2 * m]) = (i1_4 + tmpi); c_re(kp[18 * m]) = (r1_4 - tmpr); c_im(kp[18 * m]) = (i1_4 - tmpi); tmpr = ((0.831469612303 * r1_7) + (0.55557023302 * i1_7)); tmpi = ((0.831469612303 * i1_7) - (0.55557023302 * r1_7)); c_re(kp[3 * m]) = (r1_6 + tmpr); c_im(kp[3 * m]) = (i1_6 + tmpi); c_re(kp[19 * m]) = (r1_6 - tmpr); c_im(kp[19 * m]) = (i1_6 - tmpi); tmpr = (0.707106781187 * (r1_9 + i1_9)); tmpi = (0.707106781187 * (i1_9 - r1_9)); c_re(kp[4 * m]) = (r1_8 + tmpr); c_im(kp[4 * m]) = (i1_8 + tmpi); c_re(kp[20 * m]) = (r1_8 - tmpr); c_im(kp[20 * m]) = (i1_8 - tmpi); tmpr = ((0.55557023302 * r1_11) + (0.831469612303 * i1_11)); tmpi = ((0.55557023302 * i1_11) - (0.831469612303 * r1_11)); c_re(kp[5 * m]) = (r1_10 + tmpr); c_im(kp[5 * m]) = (i1_10 + tmpi); c_re(kp[21 * m]) = (r1_10 - tmpr); c_im(kp[21 * m]) = (i1_10 - tmpi); tmpr = ((0.382683432365 * r1_13) + (0.923879532511 * i1_13)); tmpi = ((0.382683432365 * i1_13) - (0.923879532511 * r1_13)); c_re(kp[6 * m]) = (r1_12 + tmpr); c_im(kp[6 * m]) = (i1_12 + tmpi); c_re(kp[22 * m]) = (r1_12 - tmpr); c_im(kp[22 * m]) = (i1_12 - tmpi); tmpr = ((0.195090322016 * r1_15) + (0.980785280403 * i1_15)); tmpi = ((0.195090322016 * i1_15) - (0.980785280403 * r1_15)); c_re(kp[7 * m]) = (r1_14 + tmpr); c_im(kp[7 * m]) = (i1_14 + tmpi); c_re(kp[23 * m]) = (r1_14 - tmpr); c_im(kp[23 * m]) = (i1_14 - tmpi); c_re(kp[8 * m]) = (r1_16 + i1_17); c_im(kp[8 * m]) = (i1_16 - r1_17); c_re(kp[24 * m]) = (r1_16 - i1_17); c_im(kp[24 * m]) = (i1_16 + r1_17); tmpr = ((0.980785280403 * i1_19) - (0.195090322016 * r1_19)); tmpi = ((0.980785280403 * r1_19) + (0.195090322016 * i1_19)); c_re(kp[9 * m]) = (r1_18 + tmpr); c_im(kp[9 * m]) = (i1_18 - tmpi); c_re(kp[25 * m]) = (r1_18 - tmpr); c_im(kp[25 * m]) = (i1_18 + tmpi); tmpr = ((0.923879532511 * i1_21) - (0.382683432365 * r1_21)); tmpi = ((0.923879532511 * r1_21) + (0.382683432365 * i1_21)); c_re(kp[10 * m]) = (r1_20 + tmpr); c_im(kp[10 * m]) = (i1_20 - tmpi); c_re(kp[26 * m]) = (r1_20 - tmpr); c_im(kp[26 * m]) = (i1_20 + tmpi); tmpr = ((0.831469612303 * i1_23) - (0.55557023302 * r1_23)); tmpi = ((0.831469612303 * r1_23) + (0.55557023302 * i1_23)); c_re(kp[11 * m]) = (r1_22 + tmpr); c_im(kp[11 * m]) = (i1_22 - tmpi); c_re(kp[27 * m]) = (r1_22 - tmpr); c_im(kp[27 * m]) = (i1_22 + tmpi); tmpr = (0.707106781187 * (i1_25 - r1_25)); tmpi = (0.707106781187 * (r1_25 + i1_25)); c_re(kp[12 * m]) = (r1_24 + tmpr); c_im(kp[12 * m]) = (i1_24 - tmpi); c_re(kp[28 * m]) = (r1_24 - tmpr); c_im(kp[28 * m]) = (i1_24 + tmpi); tmpr = ((0.55557023302 * i1_27) - (0.831469612303 * r1_27)); tmpi = ((0.55557023302 * r1_27) + (0.831469612303 * i1_27)); c_re(kp[13 * m]) = (r1_26 + tmpr); c_im(kp[13 * m]) = (i1_26 - tmpi); c_re(kp[29 * m]) = (r1_26 - tmpr); c_im(kp[29 * m]) = (i1_26 + tmpi); tmpr = ((0.382683432365 * i1_29) - (0.923879532511 * r1_29)); tmpi = ((0.382683432365 * r1_29) + (0.923879532511 * i1_29)); c_re(kp[14 * m]) = (r1_28 + tmpr); c_im(kp[14 * m]) = (i1_28 - tmpi); c_re(kp[30 * m]) = (r1_28 - tmpr); c_im(kp[30 * m]) = (i1_28 + tmpi); tmpr = ((0.195090322016 * i1_31) - (0.980785280403 * r1_31)); tmpi = ((0.195090322016 * r1_31) + (0.980785280403 * i1_31)); c_re(kp[15 * m]) = (r1_30 + tmpr); c_im(kp[15 * m]) = (i1_30 - tmpi); c_re(kp[31 * m]) = (r1_30 - tmpr); c_im(kp[31 * m]) = (i1_30 + tmpi); } } } else { int ab = (a + b) / 2; #pragma omp task fft_twiddle_32(a, ab, in, out, W, nW, nWdn, m); #pragma omp task fft_twiddle_32(ab, b, in, out, W, nW, nWdn, m); #pragma omp taskwait } } void fft_twiddle_32_seq(int a, int b, COMPLEX * in, COMPLEX * out, COMPLEX * W, int nW, int nWdn, int m) { int l1, i; COMPLEX *jp, *kp; REAL tmpr, tmpi, wr, wi; if ((b - a) < 128) { for (i = a, l1 = nWdn * i, kp = out + i; i < b; i++, l1 += nWdn, kp++) { jp = in + i; { REAL r1_0, i1_0; REAL r1_1, i1_1; REAL r1_2, i1_2; REAL r1_3, i1_3; REAL r1_4, i1_4; REAL r1_5, i1_5; REAL r1_6, i1_6; REAL r1_7, i1_7; REAL r1_8, i1_8; REAL r1_9, i1_9; REAL r1_10, i1_10; REAL r1_11, i1_11; REAL r1_12, i1_12; REAL r1_13, i1_13; REAL r1_14, i1_14; REAL r1_15, i1_15; REAL r1_16, i1_16; REAL r1_17, i1_17; REAL r1_18, i1_18; REAL r1_19, i1_19; REAL r1_20, i1_20; REAL r1_21, i1_21; REAL r1_22, i1_22; REAL r1_23, i1_23; REAL r1_24, i1_24; REAL r1_25, i1_25; REAL r1_26, i1_26; REAL r1_27, i1_27; REAL r1_28, i1_28; REAL r1_29, i1_29; REAL r1_30, i1_30; REAL r1_31, i1_31; { REAL r2_0, i2_0; REAL r2_2, i2_2; REAL r2_4, i2_4; REAL r2_6, i2_6; REAL r2_8, i2_8; REAL r2_10, i2_10; REAL r2_12, i2_12; REAL r2_14, i2_14; REAL r2_16, i2_16; REAL r2_18, i2_18; REAL r2_20, i2_20; REAL r2_22, i2_22; REAL r2_24, i2_24; REAL r2_26, i2_26; REAL r2_28, i2_28; REAL r2_30, i2_30; { REAL r3_0, i3_0; REAL r3_4, i3_4; REAL r3_8, i3_8; REAL r3_12, i3_12; REAL r3_16, i3_16; REAL r3_20, i3_20; REAL r3_24, i3_24; REAL r3_28, i3_28; { REAL r4_0, i4_0; REAL r4_8, i4_8; REAL r4_16, i4_16; REAL r4_24, i4_24; { REAL r5_0, i5_0; REAL r5_16, i5_16; r5_0 = c_re(jp[0 * m]); i5_0 = c_im(jp[0 * m]); wr = c_re(W[16 * l1]); wi = c_im(W[16 * l1]); tmpr = c_re(jp[16 * m]); tmpi = c_im(jp[16 * m]); r5_16 = ((wr * tmpr) - (wi * tmpi)); i5_16 = ((wi * tmpr) + (wr * tmpi)); r4_0 = (r5_0 + r5_16); i4_0 = (i5_0 + i5_16); r4_16 = (r5_0 - r5_16); i4_16 = (i5_0 - i5_16); } { REAL r5_8, i5_8; REAL r5_24, i5_24; wr = c_re(W[8 * l1]); wi = c_im(W[8 * l1]); tmpr = c_re(jp[8 * m]); tmpi = c_im(jp[8 * m]); r5_8 = ((wr * tmpr) - (wi * tmpi)); i5_8 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[24 * l1]); wi = c_im(W[24 * l1]); tmpr = c_re(jp[24 * m]); tmpi = c_im(jp[24 * m]); r5_24 = ((wr * tmpr) - (wi * tmpi)); i5_24 = ((wi * tmpr) + (wr * tmpi)); r4_8 = (r5_8 + r5_24); i4_8 = (i5_8 + i5_24); r4_24 = (r5_8 - r5_24); i4_24 = (i5_8 - i5_24); } r3_0 = (r4_0 + r4_8); i3_0 = (i4_0 + i4_8); r3_16 = (r4_0 - r4_8); i3_16 = (i4_0 - i4_8); r3_8 = (r4_16 + i4_24); i3_8 = (i4_16 - r4_24); r3_24 = (r4_16 - i4_24); i3_24 = (i4_16 + r4_24); } { REAL r4_4, i4_4; REAL r4_12, i4_12; REAL r4_20, i4_20; REAL r4_28, i4_28; { REAL r5_4, i5_4; REAL r5_20, i5_20; wr = c_re(W[4 * l1]); wi = c_im(W[4 * l1]); tmpr = c_re(jp[4 * m]); tmpi = c_im(jp[4 * m]); r5_4 = ((wr * tmpr) - (wi * tmpi)); i5_4 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[20 * l1]); wi = c_im(W[20 * l1]); tmpr = c_re(jp[20 * m]); tmpi = c_im(jp[20 * m]); r5_20 = ((wr * tmpr) - (wi * tmpi)); i5_20 = ((wi * tmpr) + (wr * tmpi)); r4_4 = (r5_4 + r5_20); i4_4 = (i5_4 + i5_20); r4_20 = (r5_4 - r5_20); i4_20 = (i5_4 - i5_20); } { REAL r5_12, i5_12; REAL r5_28, i5_28; wr = c_re(W[12 * l1]); wi = c_im(W[12 * l1]); tmpr = c_re(jp[12 * m]); tmpi = c_im(jp[12 * m]); r5_12 = ((wr * tmpr) - (wi * tmpi)); i5_12 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[28 * l1]); wi = c_im(W[28 * l1]); tmpr = c_re(jp[28 * m]); tmpi = c_im(jp[28 * m]); r5_28 = ((wr * tmpr) - (wi * tmpi)); i5_28 = ((wi * tmpr) + (wr * tmpi)); r4_12 = (r5_12 + r5_28); i4_12 = (i5_12 + i5_28); r4_28 = (r5_12 - r5_28); i4_28 = (i5_12 - i5_28); } r3_4 = (r4_4 + r4_12); i3_4 = (i4_4 + i4_12); r3_20 = (r4_4 - r4_12); i3_20 = (i4_4 - i4_12); r3_12 = (r4_20 + i4_28); i3_12 = (i4_20 - r4_28); r3_28 = (r4_20 - i4_28); i3_28 = (i4_20 + r4_28); } r2_0 = (r3_0 + r3_4); i2_0 = (i3_0 + i3_4); r2_16 = (r3_0 - r3_4); i2_16 = (i3_0 - i3_4); tmpr = (0.707106781187 * (r3_12 + i3_12)); tmpi = (0.707106781187 * (i3_12 - r3_12)); r2_4 = (r3_8 + tmpr); i2_4 = (i3_8 + tmpi); r2_20 = (r3_8 - tmpr); i2_20 = (i3_8 - tmpi); r2_8 = (r3_16 + i3_20); i2_8 = (i3_16 - r3_20); r2_24 = (r3_16 - i3_20); i2_24 = (i3_16 + r3_20); tmpr = (0.707106781187 * (i3_28 - r3_28)); tmpi = (0.707106781187 * (r3_28 + i3_28)); r2_12 = (r3_24 + tmpr); i2_12 = (i3_24 - tmpi); r2_28 = (r3_24 - tmpr); i2_28 = (i3_24 + tmpi); } { REAL r3_2, i3_2; REAL r3_6, i3_6; REAL r3_10, i3_10; REAL r3_14, i3_14; REAL r3_18, i3_18; REAL r3_22, i3_22; REAL r3_26, i3_26; REAL r3_30, i3_30; { REAL r4_2, i4_2; REAL r4_10, i4_10; REAL r4_18, i4_18; REAL r4_26, i4_26; { REAL r5_2, i5_2; REAL r5_18, i5_18; wr = c_re(W[2 * l1]); wi = c_im(W[2 * l1]); tmpr = c_re(jp[2 * m]); tmpi = c_im(jp[2 * m]); r5_2 = ((wr * tmpr) - (wi * tmpi)); i5_2 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[18 * l1]); wi = c_im(W[18 * l1]); tmpr = c_re(jp[18 * m]); tmpi = c_im(jp[18 * m]); r5_18 = ((wr * tmpr) - (wi * tmpi)); i5_18 = ((wi * tmpr) + (wr * tmpi)); r4_2 = (r5_2 + r5_18); i4_2 = (i5_2 + i5_18); r4_18 = (r5_2 - r5_18); i4_18 = (i5_2 - i5_18); } { REAL r5_10, i5_10; REAL r5_26, i5_26; wr = c_re(W[10 * l1]); wi = c_im(W[10 * l1]); tmpr = c_re(jp[10 * m]); tmpi = c_im(jp[10 * m]); r5_10 = ((wr * tmpr) - (wi * tmpi)); i5_10 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[26 * l1]); wi = c_im(W[26 * l1]); tmpr = c_re(jp[26 * m]); tmpi = c_im(jp[26 * m]); r5_26 = ((wr * tmpr) - (wi * tmpi)); i5_26 = ((wi * tmpr) + (wr * tmpi)); r4_10 = (r5_10 + r5_26); i4_10 = (i5_10 + i5_26); r4_26 = (r5_10 - r5_26); i4_26 = (i5_10 - i5_26); } r3_2 = (r4_2 + r4_10); i3_2 = (i4_2 + i4_10); r3_18 = (r4_2 - r4_10); i3_18 = (i4_2 - i4_10); r3_10 = (r4_18 + i4_26); i3_10 = (i4_18 - r4_26); r3_26 = (r4_18 - i4_26); i3_26 = (i4_18 + r4_26); } { REAL r4_6, i4_6; REAL r4_14, i4_14; REAL r4_22, i4_22; REAL r4_30, i4_30; { REAL r5_6, i5_6; REAL r5_22, i5_22; wr = c_re(W[6 * l1]); wi = c_im(W[6 * l1]); tmpr = c_re(jp[6 * m]); tmpi = c_im(jp[6 * m]); r5_6 = ((wr * tmpr) - (wi * tmpi)); i5_6 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[22 * l1]); wi = c_im(W[22 * l1]); tmpr = c_re(jp[22 * m]); tmpi = c_im(jp[22 * m]); r5_22 = ((wr * tmpr) - (wi * tmpi)); i5_22 = ((wi * tmpr) + (wr * tmpi)); r4_6 = (r5_6 + r5_22); i4_6 = (i5_6 + i5_22); r4_22 = (r5_6 - r5_22); i4_22 = (i5_6 - i5_22); } { REAL r5_14, i5_14; REAL r5_30, i5_30; wr = c_re(W[14 * l1]); wi = c_im(W[14 * l1]); tmpr = c_re(jp[14 * m]); tmpi = c_im(jp[14 * m]); r5_14 = ((wr * tmpr) - (wi * tmpi)); i5_14 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[30 * l1]); wi = c_im(W[30 * l1]); tmpr = c_re(jp[30 * m]); tmpi = c_im(jp[30 * m]); r5_30 = ((wr * tmpr) - (wi * tmpi)); i5_30 = ((wi * tmpr) + (wr * tmpi)); r4_14 = (r5_14 + r5_30); i4_14 = (i5_14 + i5_30); r4_30 = (r5_14 - r5_30); i4_30 = (i5_14 - i5_30); } r3_6 = (r4_6 + r4_14); i3_6 = (i4_6 + i4_14); r3_22 = (r4_6 - r4_14); i3_22 = (i4_6 - i4_14); r3_14 = (r4_22 + i4_30); i3_14 = (i4_22 - r4_30); r3_30 = (r4_22 - i4_30); i3_30 = (i4_22 + r4_30); } r2_2 = (r3_2 + r3_6); i2_2 = (i3_2 + i3_6); r2_18 = (r3_2 - r3_6); i2_18 = (i3_2 - i3_6); tmpr = (0.707106781187 * (r3_14 + i3_14)); tmpi = (0.707106781187 * (i3_14 - r3_14)); r2_6 = (r3_10 + tmpr); i2_6 = (i3_10 + tmpi); r2_22 = (r3_10 - tmpr); i2_22 = (i3_10 - tmpi); r2_10 = (r3_18 + i3_22); i2_10 = (i3_18 - r3_22); r2_26 = (r3_18 - i3_22); i2_26 = (i3_18 + r3_22); tmpr = (0.707106781187 * (i3_30 - r3_30)); tmpi = (0.707106781187 * (r3_30 + i3_30)); r2_14 = (r3_26 + tmpr); i2_14 = (i3_26 - tmpi); r2_30 = (r3_26 - tmpr); i2_30 = (i3_26 + tmpi); } r1_0 = (r2_0 + r2_2); i1_0 = (i2_0 + i2_2); r1_16 = (r2_0 - r2_2); i1_16 = (i2_0 - i2_2); tmpr = ((0.923879532511 * r2_6) + (0.382683432365 * i2_6)); tmpi = ((0.923879532511 * i2_6) - (0.382683432365 * r2_6)); r1_2 = (r2_4 + tmpr); i1_2 = (i2_4 + tmpi); r1_18 = (r2_4 - tmpr); i1_18 = (i2_4 - tmpi); tmpr = (0.707106781187 * (r2_10 + i2_10)); tmpi = (0.707106781187 * (i2_10 - r2_10)); r1_4 = (r2_8 + tmpr); i1_4 = (i2_8 + tmpi); r1_20 = (r2_8 - tmpr); i1_20 = (i2_8 - tmpi); tmpr = ((0.382683432365 * r2_14) + (0.923879532511 * i2_14)); tmpi = ((0.382683432365 * i2_14) - (0.923879532511 * r2_14)); r1_6 = (r2_12 + tmpr); i1_6 = (i2_12 + tmpi); r1_22 = (r2_12 - tmpr); i1_22 = (i2_12 - tmpi); r1_8 = (r2_16 + i2_18); i1_8 = (i2_16 - r2_18); r1_24 = (r2_16 - i2_18); i1_24 = (i2_16 + r2_18); tmpr = ((0.923879532511 * i2_22) - (0.382683432365 * r2_22)); tmpi = ((0.923879532511 * r2_22) + (0.382683432365 * i2_22)); r1_10 = (r2_20 + tmpr); i1_10 = (i2_20 - tmpi); r1_26 = (r2_20 - tmpr); i1_26 = (i2_20 + tmpi); tmpr = (0.707106781187 * (i2_26 - r2_26)); tmpi = (0.707106781187 * (r2_26 + i2_26)); r1_12 = (r2_24 + tmpr); i1_12 = (i2_24 - tmpi); r1_28 = (r2_24 - tmpr); i1_28 = (i2_24 + tmpi); tmpr = ((0.382683432365 * i2_30) - (0.923879532511 * r2_30)); tmpi = ((0.382683432365 * r2_30) + (0.923879532511 * i2_30)); r1_14 = (r2_28 + tmpr); i1_14 = (i2_28 - tmpi); r1_30 = (r2_28 - tmpr); i1_30 = (i2_28 + tmpi); } { REAL r2_1, i2_1; REAL r2_3, i2_3; REAL r2_5, i2_5; REAL r2_7, i2_7; REAL r2_9, i2_9; REAL r2_11, i2_11; REAL r2_13, i2_13; REAL r2_15, i2_15; REAL r2_17, i2_17; REAL r2_19, i2_19; REAL r2_21, i2_21; REAL r2_23, i2_23; REAL r2_25, i2_25; REAL r2_27, i2_27; REAL r2_29, i2_29; REAL r2_31, i2_31; { REAL r3_1, i3_1; REAL r3_5, i3_5; REAL r3_9, i3_9; REAL r3_13, i3_13; REAL r3_17, i3_17; REAL r3_21, i3_21; REAL r3_25, i3_25; REAL r3_29, i3_29; { REAL r4_1, i4_1; REAL r4_9, i4_9; REAL r4_17, i4_17; REAL r4_25, i4_25; { REAL r5_1, i5_1; REAL r5_17, i5_17; wr = c_re(W[1 * l1]); wi = c_im(W[1 * l1]); tmpr = c_re(jp[1 * m]); tmpi = c_im(jp[1 * m]); r5_1 = ((wr * tmpr) - (wi * tmpi)); i5_1 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[17 * l1]); wi = c_im(W[17 * l1]); tmpr = c_re(jp[17 * m]); tmpi = c_im(jp[17 * m]); r5_17 = ((wr * tmpr) - (wi * tmpi)); i5_17 = ((wi * tmpr) + (wr * tmpi)); r4_1 = (r5_1 + r5_17); i4_1 = (i5_1 + i5_17); r4_17 = (r5_1 - r5_17); i4_17 = (i5_1 - i5_17); } { REAL r5_9, i5_9; REAL r5_25, i5_25; wr = c_re(W[9 * l1]); wi = c_im(W[9 * l1]); tmpr = c_re(jp[9 * m]); tmpi = c_im(jp[9 * m]); r5_9 = ((wr * tmpr) - (wi * tmpi)); i5_9 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[25 * l1]); wi = c_im(W[25 * l1]); tmpr = c_re(jp[25 * m]); tmpi = c_im(jp[25 * m]); r5_25 = ((wr * tmpr) - (wi * tmpi)); i5_25 = ((wi * tmpr) + (wr * tmpi)); r4_9 = (r5_9 + r5_25); i4_9 = (i5_9 + i5_25); r4_25 = (r5_9 - r5_25); i4_25 = (i5_9 - i5_25); } r3_1 = (r4_1 + r4_9); i3_1 = (i4_1 + i4_9); r3_17 = (r4_1 - r4_9); i3_17 = (i4_1 - i4_9); r3_9 = (r4_17 + i4_25); i3_9 = (i4_17 - r4_25); r3_25 = (r4_17 - i4_25); i3_25 = (i4_17 + r4_25); } { REAL r4_5, i4_5; REAL r4_13, i4_13; REAL r4_21, i4_21; REAL r4_29, i4_29; { REAL r5_5, i5_5; REAL r5_21, i5_21; wr = c_re(W[5 * l1]); wi = c_im(W[5 * l1]); tmpr = c_re(jp[5 * m]); tmpi = c_im(jp[5 * m]); r5_5 = ((wr * tmpr) - (wi * tmpi)); i5_5 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[21 * l1]); wi = c_im(W[21 * l1]); tmpr = c_re(jp[21 * m]); tmpi = c_im(jp[21 * m]); r5_21 = ((wr * tmpr) - (wi * tmpi)); i5_21 = ((wi * tmpr) + (wr * tmpi)); r4_5 = (r5_5 + r5_21); i4_5 = (i5_5 + i5_21); r4_21 = (r5_5 - r5_21); i4_21 = (i5_5 - i5_21); } { REAL r5_13, i5_13; REAL r5_29, i5_29; wr = c_re(W[13 * l1]); wi = c_im(W[13 * l1]); tmpr = c_re(jp[13 * m]); tmpi = c_im(jp[13 * m]); r5_13 = ((wr * tmpr) - (wi * tmpi)); i5_13 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[29 * l1]); wi = c_im(W[29 * l1]); tmpr = c_re(jp[29 * m]); tmpi = c_im(jp[29 * m]); r5_29 = ((wr * tmpr) - (wi * tmpi)); i5_29 = ((wi * tmpr) + (wr * tmpi)); r4_13 = (r5_13 + r5_29); i4_13 = (i5_13 + i5_29); r4_29 = (r5_13 - r5_29); i4_29 = (i5_13 - i5_29); } r3_5 = (r4_5 + r4_13); i3_5 = (i4_5 + i4_13); r3_21 = (r4_5 - r4_13); i3_21 = (i4_5 - i4_13); r3_13 = (r4_21 + i4_29); i3_13 = (i4_21 - r4_29); r3_29 = (r4_21 - i4_29); i3_29 = (i4_21 + r4_29); } r2_1 = (r3_1 + r3_5); i2_1 = (i3_1 + i3_5); r2_17 = (r3_1 - r3_5); i2_17 = (i3_1 - i3_5); tmpr = (0.707106781187 * (r3_13 + i3_13)); tmpi = (0.707106781187 * (i3_13 - r3_13)); r2_5 = (r3_9 + tmpr); i2_5 = (i3_9 + tmpi); r2_21 = (r3_9 - tmpr); i2_21 = (i3_9 - tmpi); r2_9 = (r3_17 + i3_21); i2_9 = (i3_17 - r3_21); r2_25 = (r3_17 - i3_21); i2_25 = (i3_17 + r3_21); tmpr = (0.707106781187 * (i3_29 - r3_29)); tmpi = (0.707106781187 * (r3_29 + i3_29)); r2_13 = (r3_25 + tmpr); i2_13 = (i3_25 - tmpi); r2_29 = (r3_25 - tmpr); i2_29 = (i3_25 + tmpi); } { REAL r3_3, i3_3; REAL r3_7, i3_7; REAL r3_11, i3_11; REAL r3_15, i3_15; REAL r3_19, i3_19; REAL r3_23, i3_23; REAL r3_27, i3_27; REAL r3_31, i3_31; { REAL r4_3, i4_3; REAL r4_11, i4_11; REAL r4_19, i4_19; REAL r4_27, i4_27; { REAL r5_3, i5_3; REAL r5_19, i5_19; wr = c_re(W[3 * l1]); wi = c_im(W[3 * l1]); tmpr = c_re(jp[3 * m]); tmpi = c_im(jp[3 * m]); r5_3 = ((wr * tmpr) - (wi * tmpi)); i5_3 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[19 * l1]); wi = c_im(W[19 * l1]); tmpr = c_re(jp[19 * m]); tmpi = c_im(jp[19 * m]); r5_19 = ((wr * tmpr) - (wi * tmpi)); i5_19 = ((wi * tmpr) + (wr * tmpi)); r4_3 = (r5_3 + r5_19); i4_3 = (i5_3 + i5_19); r4_19 = (r5_3 - r5_19); i4_19 = (i5_3 - i5_19); } { REAL r5_11, i5_11; REAL r5_27, i5_27; wr = c_re(W[11 * l1]); wi = c_im(W[11 * l1]); tmpr = c_re(jp[11 * m]); tmpi = c_im(jp[11 * m]); r5_11 = ((wr * tmpr) - (wi * tmpi)); i5_11 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[27 * l1]); wi = c_im(W[27 * l1]); tmpr = c_re(jp[27 * m]); tmpi = c_im(jp[27 * m]); r5_27 = ((wr * tmpr) - (wi * tmpi)); i5_27 = ((wi * tmpr) + (wr * tmpi)); r4_11 = (r5_11 + r5_27); i4_11 = (i5_11 + i5_27); r4_27 = (r5_11 - r5_27); i4_27 = (i5_11 - i5_27); } r3_3 = (r4_3 + r4_11); i3_3 = (i4_3 + i4_11); r3_19 = (r4_3 - r4_11); i3_19 = (i4_3 - i4_11); r3_11 = (r4_19 + i4_27); i3_11 = (i4_19 - r4_27); r3_27 = (r4_19 - i4_27); i3_27 = (i4_19 + r4_27); } { REAL r4_7, i4_7; REAL r4_15, i4_15; REAL r4_23, i4_23; REAL r4_31, i4_31; { REAL r5_7, i5_7; REAL r5_23, i5_23; wr = c_re(W[7 * l1]); wi = c_im(W[7 * l1]); tmpr = c_re(jp[7 * m]); tmpi = c_im(jp[7 * m]); r5_7 = ((wr * tmpr) - (wi * tmpi)); i5_7 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[23 * l1]); wi = c_im(W[23 * l1]); tmpr = c_re(jp[23 * m]); tmpi = c_im(jp[23 * m]); r5_23 = ((wr * tmpr) - (wi * tmpi)); i5_23 = ((wi * tmpr) + (wr * tmpi)); r4_7 = (r5_7 + r5_23); i4_7 = (i5_7 + i5_23); r4_23 = (r5_7 - r5_23); i4_23 = (i5_7 - i5_23); } { REAL r5_15, i5_15; REAL r5_31, i5_31; wr = c_re(W[15 * l1]); wi = c_im(W[15 * l1]); tmpr = c_re(jp[15 * m]); tmpi = c_im(jp[15 * m]); r5_15 = ((wr * tmpr) - (wi * tmpi)); i5_15 = ((wi * tmpr) + (wr * tmpi)); wr = c_re(W[31 * l1]); wi = c_im(W[31 * l1]); tmpr = c_re(jp[31 * m]); tmpi = c_im(jp[31 * m]); r5_31 = ((wr * tmpr) - (wi * tmpi)); i5_31 = ((wi * tmpr) + (wr * tmpi)); r4_15 = (r5_15 + r5_31); i4_15 = (i5_15 + i5_31); r4_31 = (r5_15 - r5_31); i4_31 = (i5_15 - i5_31); } r3_7 = (r4_7 + r4_15); i3_7 = (i4_7 + i4_15); r3_23 = (r4_7 - r4_15); i3_23 = (i4_7 - i4_15); r3_15 = (r4_23 + i4_31); i3_15 = (i4_23 - r4_31); r3_31 = (r4_23 - i4_31); i3_31 = (i4_23 + r4_31); } r2_3 = (r3_3 + r3_7); i2_3 = (i3_3 + i3_7); r2_19 = (r3_3 - r3_7); i2_19 = (i3_3 - i3_7); tmpr = (0.707106781187 * (r3_15 + i3_15)); tmpi = (0.707106781187 * (i3_15 - r3_15)); r2_7 = (r3_11 + tmpr); i2_7 = (i3_11 + tmpi); r2_23 = (r3_11 - tmpr); i2_23 = (i3_11 - tmpi); r2_11 = (r3_19 + i3_23); i2_11 = (i3_19 - r3_23); r2_27 = (r3_19 - i3_23); i2_27 = (i3_19 + r3_23); tmpr = (0.707106781187 * (i3_31 - r3_31)); tmpi = (0.707106781187 * (r3_31 + i3_31)); r2_15 = (r3_27 + tmpr); i2_15 = (i3_27 - tmpi); r2_31 = (r3_27 - tmpr); i2_31 = (i3_27 + tmpi); } r1_1 = (r2_1 + r2_3); i1_1 = (i2_1 + i2_3); r1_17 = (r2_1 - r2_3); i1_17 = (i2_1 - i2_3); tmpr = ((0.923879532511 * r2_7) + (0.382683432365 * i2_7)); tmpi = ((0.923879532511 * i2_7) - (0.382683432365 * r2_7)); r1_3 = (r2_5 + tmpr); i1_3 = (i2_5 + tmpi); r1_19 = (r2_5 - tmpr); i1_19 = (i2_5 - tmpi); tmpr = (0.707106781187 * (r2_11 + i2_11)); tmpi = (0.707106781187 * (i2_11 - r2_11)); r1_5 = (r2_9 + tmpr); i1_5 = (i2_9 + tmpi); r1_21 = (r2_9 - tmpr); i1_21 = (i2_9 - tmpi); tmpr = ((0.382683432365 * r2_15) + (0.923879532511 * i2_15)); tmpi = ((0.382683432365 * i2_15) - (0.923879532511 * r2_15)); r1_7 = (r2_13 + tmpr); i1_7 = (i2_13 + tmpi); r1_23 = (r2_13 - tmpr); i1_23 = (i2_13 - tmpi); r1_9 = (r2_17 + i2_19); i1_9 = (i2_17 - r2_19); r1_25 = (r2_17 - i2_19); i1_25 = (i2_17 + r2_19); tmpr = ((0.923879532511 * i2_23) - (0.382683432365 * r2_23)); tmpi = ((0.923879532511 * r2_23) + (0.382683432365 * i2_23)); r1_11 = (r2_21 + tmpr); i1_11 = (i2_21 - tmpi); r1_27 = (r2_21 - tmpr); i1_27 = (i2_21 + tmpi); tmpr = (0.707106781187 * (i2_27 - r2_27)); tmpi = (0.707106781187 * (r2_27 + i2_27)); r1_13 = (r2_25 + tmpr); i1_13 = (i2_25 - tmpi); r1_29 = (r2_25 - tmpr); i1_29 = (i2_25 + tmpi); tmpr = ((0.382683432365 * i2_31) - (0.923879532511 * r2_31)); tmpi = ((0.382683432365 * r2_31) + (0.923879532511 * i2_31)); r1_15 = (r2_29 + tmpr); i1_15 = (i2_29 - tmpi); r1_31 = (r2_29 - tmpr); i1_31 = (i2_29 + tmpi); } c_re(kp[0 * m]) = (r1_0 + r1_1); c_im(kp[0 * m]) = (i1_0 + i1_1); c_re(kp[16 * m]) = (r1_0 - r1_1); c_im(kp[16 * m]) = (i1_0 - i1_1); tmpr = ((0.980785280403 * r1_3) + (0.195090322016 * i1_3)); tmpi = ((0.980785280403 * i1_3) - (0.195090322016 * r1_3)); c_re(kp[1 * m]) = (r1_2 + tmpr); c_im(kp[1 * m]) = (i1_2 + tmpi); c_re(kp[17 * m]) = (r1_2 - tmpr); c_im(kp[17 * m]) = (i1_2 - tmpi); tmpr = ((0.923879532511 * r1_5) + (0.382683432365 * i1_5)); tmpi = ((0.923879532511 * i1_5) - (0.382683432365 * r1_5)); c_re(kp[2 * m]) = (r1_4 + tmpr); c_im(kp[2 * m]) = (i1_4 + tmpi); c_re(kp[18 * m]) = (r1_4 - tmpr); c_im(kp[18 * m]) = (i1_4 - tmpi); tmpr = ((0.831469612303 * r1_7) + (0.55557023302 * i1_7)); tmpi = ((0.831469612303 * i1_7) - (0.55557023302 * r1_7)); c_re(kp[3 * m]) = (r1_6 + tmpr); c_im(kp[3 * m]) = (i1_6 + tmpi); c_re(kp[19 * m]) = (r1_6 - tmpr); c_im(kp[19 * m]) = (i1_6 - tmpi); tmpr = (0.707106781187 * (r1_9 + i1_9)); tmpi = (0.707106781187 * (i1_9 - r1_9)); c_re(kp[4 * m]) = (r1_8 + tmpr); c_im(kp[4 * m]) = (i1_8 + tmpi); c_re(kp[20 * m]) = (r1_8 - tmpr); c_im(kp[20 * m]) = (i1_8 - tmpi); tmpr = ((0.55557023302 * r1_11) + (0.831469612303 * i1_11)); tmpi = ((0.55557023302 * i1_11) - (0.831469612303 * r1_11)); c_re(kp[5 * m]) = (r1_10 + tmpr); c_im(kp[5 * m]) = (i1_10 + tmpi); c_re(kp[21 * m]) = (r1_10 - tmpr); c_im(kp[21 * m]) = (i1_10 - tmpi); tmpr = ((0.382683432365 * r1_13) + (0.923879532511 * i1_13)); tmpi = ((0.382683432365 * i1_13) - (0.923879532511 * r1_13)); c_re(kp[6 * m]) = (r1_12 + tmpr); c_im(kp[6 * m]) = (i1_12 + tmpi); c_re(kp[22 * m]) = (r1_12 - tmpr); c_im(kp[22 * m]) = (i1_12 - tmpi); tmpr = ((0.195090322016 * r1_15) + (0.980785280403 * i1_15)); tmpi = ((0.195090322016 * i1_15) - (0.980785280403 * r1_15)); c_re(kp[7 * m]) = (r1_14 + tmpr); c_im(kp[7 * m]) = (i1_14 + tmpi); c_re(kp[23 * m]) = (r1_14 - tmpr); c_im(kp[23 * m]) = (i1_14 - tmpi); c_re(kp[8 * m]) = (r1_16 + i1_17); c_im(kp[8 * m]) = (i1_16 - r1_17); c_re(kp[24 * m]) = (r1_16 - i1_17); c_im(kp[24 * m]) = (i1_16 + r1_17); tmpr = ((0.980785280403 * i1_19) - (0.195090322016 * r1_19)); tmpi = ((0.980785280403 * r1_19) + (0.195090322016 * i1_19)); c_re(kp[9 * m]) = (r1_18 + tmpr); c_im(kp[9 * m]) = (i1_18 - tmpi); c_re(kp[25 * m]) = (r1_18 - tmpr); c_im(kp[25 * m]) = (i1_18 + tmpi); tmpr = ((0.923879532511 * i1_21) - (0.382683432365 * r1_21)); tmpi = ((0.923879532511 * r1_21) + (0.382683432365 * i1_21)); c_re(kp[10 * m]) = (r1_20 + tmpr); c_im(kp[10 * m]) = (i1_20 - tmpi); c_re(kp[26 * m]) = (r1_20 - tmpr); c_im(kp[26 * m]) = (i1_20 + tmpi); tmpr = ((0.831469612303 * i1_23) - (0.55557023302 * r1_23)); tmpi = ((0.831469612303 * r1_23) + (0.55557023302 * i1_23)); c_re(kp[11 * m]) = (r1_22 + tmpr); c_im(kp[11 * m]) = (i1_22 - tmpi); c_re(kp[27 * m]) = (r1_22 - tmpr); c_im(kp[27 * m]) = (i1_22 + tmpi); tmpr = (0.707106781187 * (i1_25 - r1_25)); tmpi = (0.707106781187 * (r1_25 + i1_25)); c_re(kp[12 * m]) = (r1_24 + tmpr); c_im(kp[12 * m]) = (i1_24 - tmpi); c_re(kp[28 * m]) = (r1_24 - tmpr); c_im(kp[28 * m]) = (i1_24 + tmpi); tmpr = ((0.55557023302 * i1_27) - (0.831469612303 * r1_27)); tmpi = ((0.55557023302 * r1_27) + (0.831469612303 * i1_27)); c_re(kp[13 * m]) = (r1_26 + tmpr); c_im(kp[13 * m]) = (i1_26 - tmpi); c_re(kp[29 * m]) = (r1_26 - tmpr); c_im(kp[29 * m]) = (i1_26 + tmpi); tmpr = ((0.382683432365 * i1_29) - (0.923879532511 * r1_29)); tmpi = ((0.382683432365 * r1_29) + (0.923879532511 * i1_29)); c_re(kp[14 * m]) = (r1_28 + tmpr); c_im(kp[14 * m]) = (i1_28 - tmpi); c_re(kp[30 * m]) = (r1_28 - tmpr); c_im(kp[30 * m]) = (i1_28 + tmpi); tmpr = ((0.195090322016 * i1_31) - (0.980785280403 * r1_31)); tmpi = ((0.195090322016 * r1_31) + (0.980785280403 * i1_31)); c_re(kp[15 * m]) = (r1_30 + tmpr); c_im(kp[15 * m]) = (i1_30 - tmpi); c_re(kp[31 * m]) = (r1_30 - tmpr); c_im(kp[31 * m]) = (i1_30 + tmpi); } } } else { int ab = (a + b) / 2; fft_twiddle_32_seq(a, ab, in, out, W, nW, nWdn, m); fft_twiddle_32_seq(ab, b, in, out, W, nW, nWdn, m); } } void fft_unshuffle_32(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 32; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; #pragma omp task fft_unshuffle_32(a, ab, in, out, m); #pragma omp task fft_unshuffle_32(ab, b, in, out, m); #pragma omp taskwait } } void fft_unshuffle_32_seq(int a, int b, COMPLEX * in, COMPLEX * out, int m) { int i; const COMPLEX *ip; COMPLEX *jp; if ((b - a) < 128) { ip = in + a * 32; for (i = a; i < b; ++i) { jp = out + i; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; jp += 2 * m; jp[0] = ip[0]; jp[m] = ip[1]; ip += 2; } } else { int ab = (a + b) / 2; fft_unshuffle_32_seq(a, ab, in, out, m); fft_unshuffle_32_seq(ab, b, in, out, m); } } /* end of machine-generated code */ /* * Recursive complex FFT on the n complex components of the array in: * basic Cooley-Tukey algorithm, with some improvements for * n power of two. The result is placed in the array out. n is arbitrary. * The algorithm runs in time O(n*(r1 + ... + rk)) where r1, ..., rk * are prime numbers, and r1 * r2 * ... * rk = n. * * n: size of the input * in: pointer to input * out: pointer to output * factors: list of factors of n, precomputed * W: twiddle factors * nW: size of W, that is, size of the original transform * */ void fft_aux(int n, COMPLEX * in, COMPLEX * out, int *factors, COMPLEX * W, int nW) { int r, m; int k; /* special cases */ if (n == 32) { fft_base_32(in, out); return; } if (n == 16) { fft_base_16(in, out); return; } if (n == 8) { fft_base_8(in, out); return; } if (n == 4) { fft_base_4(in, out); return; } if (n == 2) { fft_base_2(in, out); return; } /* * the cases n == 3, n == 5, and maybe 7 should be implemented as well */ r = *factors; m = n / r; if (r < n) { /* * split the DFT of length n into r DFTs of length n/r, and * recurse */ if (r == 32) { #pragma omp task fft_unshuffle_32(0, m, in, out, m); } else if (r == 16) { #pragma omp task fft_unshuffle_16(0, m, in, out, m); } else if (r == 8) { #pragma omp task fft_unshuffle_8(0, m, in, out, m); } else if (r == 4) { #pragma omp task fft_unshuffle_4(0, m, in, out, m); } else if (r == 2) { #pragma omp task fft_unshuffle_2(0, m, in, out, m); } else unshuffle(0, m, in, out, r, m); #pragma omp taskwait for (k = 0; k < n; k += m) { #pragma omp task fft_aux(m, out + k, in + k, factors + 1, W, nW); } #pragma omp taskwait } /* * now multiply by the twiddle factors, and perform m FFTs * of length r */ if (r == 2) { #pragma omp task fft_twiddle_2(0, m, in, out, W, nW, nW / n, m); } else if (r == 4) { #pragma omp task fft_twiddle_4(0, m, in, out, W, nW, nW / n, m); } else if (r == 8) { #pragma omp task fft_twiddle_8(0, m, in, out, W, nW, nW / n, m); } else if (r == 16) { #pragma omp task fft_twiddle_16(0, m, in, out, W, nW, nW / n, m); } else if (r == 32) { #pragma omp task fft_twiddle_32(0, m, in, out, W, nW, nW / n, m); } else { #pragma omp task fft_twiddle_gen(0, m, in, out, W, nW, nW / n, r, m); } #pragma omp taskwait return; } void fft_aux_seq(int n, COMPLEX * in, COMPLEX * out, int *factors, COMPLEX * W, int nW) { int r, m; int k; /* special cases */ if (n == 32) { fft_base_32(in, out); return; } if (n == 16) { fft_base_16(in, out); return; } if (n == 8) { fft_base_8(in, out); return; } if (n == 4) { fft_base_4(in, out); return; } if (n == 2) { fft_base_2(in, out); return; } /* * the cases n == 3, n == 5, and maybe 7 should be implemented as well */ r = *factors; m = n / r; if (r < n) { /* * split the DFT of length n into r DFTs of length n/r, and * recurse */ if (r == 32) fft_unshuffle_32_seq(0, m, in, out, m); else if (r == 16) fft_unshuffle_16_seq(0, m, in, out, m); else if (r == 8) fft_unshuffle_8_seq(0, m, in, out, m); else if (r == 4) fft_unshuffle_4_seq(0, m, in, out, m); else if (r == 2) fft_unshuffle_2_seq(0, m, in, out, m); else unshuffle_seq(0, m, in, out, r, m); for (k = 0; k < n; k += m) { fft_aux_seq(m, out + k, in + k, factors + 1, W, nW); } } /* * now multiply by the twiddle factors, and perform m FFTs * of length r */ if (r == 2) fft_twiddle_2_seq(0, m, in, out, W, nW, nW / n, m); else if (r == 4) fft_twiddle_4_seq(0, m, in, out, W, nW, nW / n, m); else if (r == 8) fft_twiddle_8_seq(0, m, in, out, W, nW, nW / n, m); else if (r == 16) fft_twiddle_16_seq(0, m, in, out, W, nW, nW / n, m); else if (r == 32) fft_twiddle_32_seq(0, m, in, out, W, nW, nW / n, m); else fft_twiddle_gen_seq(0, m, in, out, W, nW, nW / n, r, m); return; } /* * user interface for fft_aux */ void fft(int n, COMPLEX * in, COMPLEX * out) { int factors[40]; /* allows FFTs up to at least 3^40 */ int *p = factors; int l = n; int r; COMPLEX *W; bots_message("Computing coefficients "); W = (COMPLEX *) malloc((n + 1) * sizeof(COMPLEX)); #pragma omp parallel #pragma omp single #pragma omp task compute_w_coefficients(n, 0, n / 2, W); bots_message(" completed!\n"); /* * find factors of n, first 8, then 4 and then primes in ascending * order */ do { r = factor(l); *p++ = r; l /= r; } while (l > 1); bots_message("Computing FFT "); #pragma omp parallel #pragma omp single #pragma omp task fft_aux(n, in, out, factors, W, n); bots_message(" completed!\n"); free(W); return; } void fft_seq(int n, COMPLEX * in, COMPLEX * out) { int factors[40]; /* allows FFTs up to at least 3^40 */ int *p = factors; int l = n; int r; COMPLEX *W; W = (COMPLEX *) malloc((n + 1) * sizeof(COMPLEX)); compute_w_coefficients_seq(n, 0, n / 2, W); /* * find factors of n, first 8, then 4 and then primes in ascending * order */ do { r = factor(l); *p++ = r; l /= r; } while (l > 1); fft_aux_seq(n, in, out, factors, W, n); free(W); return; } int test_correctness(int n, COMPLEX *out1, COMPLEX *out2) { int i; double a,d,error = 0.0; for (i = 0; i < n; ++i) { a = sqrt((c_re(out1[i]) - c_re(out2[i])) * (c_re(out1[i]) - c_re(out2[i])) + (c_im(out1[i]) - c_im(out2[i])) * (c_im(out1[i]) - c_im(out2[i]))); d = sqrt(c_re(out2[i]) * c_re(out2[i]) + c_im(out2[i]) * c_im(out2[i])); if (d < -1.0e-10 || d > 1.0e-10) a /= d; if (a > error) error = a; } bots_message("relative error=%e\n", error); if (error > 1e-3) return BOTS_RESULT_UNSUCCESSFUL; else return BOTS_RESULT_SUCCESSFUL; }
rmse.c
/*************************************************************************/ /** File: rmse.c **/ /** Description: calculate root mean squared error of particular **/ /** clustering. **/ /** Author: Sang-Ha Lee **/ /** University of Virginia. **/ /** **/ /** Note: euclid_dist_2() and find_nearest_point() adopted from **/ /** Minebench code. **/ /** **/ /*************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include "kmeans.h" extern double wtime(void); /*----< euclid_dist_2() >----------------------------------------------------*/ /* multi-dimensional spatial Euclid distance square */ __inline float euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]); return(ans); } /*----< find_nearest_point() >-----------------------------------------------*/ __inline int find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float **pts, /* [npts][nfeatures] */ int npts) { int index, i; float max_dist=FLT_MAX; /* find the cluster center id with min distance to pt */ for (i=0; i<npts; i++) { float dist; dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */ if (dist < max_dist) { max_dist = dist; index = i; } } return(index); } /*----< rms_err(): calculates RMSE of clustering >-------------------------------------*/ float rms_err (float **feature, /* [npoints][nfeatures] */ int nfeatures, int npoints, float **cluster_centres, /* [nclusters][nfeatures] */ int nclusters) { int i; int nearest_cluster_index; /* cluster center id with min distance to pt */ float sum_euclid = 0.0; /* sum of Euclidean distance squares */ float ret; /* return value */ /* calculate and sum the sqaure of euclidean distance*/ #pragma omp parallel for \ shared(feature,cluster_centres) \ firstprivate(npoints,nfeatures,nclusters) \ private(i, nearest_cluster_index) \ schedule (static) for (i=0; i<npoints; i++) { nearest_cluster_index = find_nearest_point(feature[i], nfeatures, cluster_centres, nclusters); sum_euclid += euclid_dist_2(feature[i], cluster_centres[nearest_cluster_index], nfeatures); } /* divide by n, then take sqrt */ ret = sqrt(sum_euclid / npoints); return(ret); }
Common.h
// @Copyright 2007 Kristjan Haule // #ifndef _COMMON_ #define _COMMON_ #include "zeroin.h" #include "average.h" #include <map> #include <vector> #ifdef _STRSTREAM #include <strstream> #endif using namespace std; typedef vector<int>::size_type vint; int Binomial(int n, int m) { int Mf = 1; for (int i=2; i<=m; i++) Mf *= i; int r = 1; for (int i=n; i>=n-m+1; i--) r*=i; return r/Mf; } //Common constants and variables class common{ public: static double U; static double T; // static double J; static int baths; static int Na, Nc; static function1D<int> Ns; static function2D<double> Ms; static function1D<int> Mtot; static function1D<int> deg; static function1D<double> sJc; static vector<vector<map<int,double> > > sncab; // index for hole diagrams static vector<vector<map<int,double> > > sncaf; // index for particle diagrams static vector<map<int,double> > suscb; // index for susceptibility static function2D<int> ncab; // index for hole diagrams static function2D<int> ncaf; // index for particle diagrams static function2D<double> prefactb; // prefactor for hole digrams static function2D<double> prefactf; // prefactor for particle diagrams static function2D<double> prefactG; // prefactor to calculate local Green's function static function1D<double> Ed; static function1D<double> Sinfty; static function1D<double> nalpha; static function1D<double> miss_nd; static function2D<double> moment; static double beta; static double delta; static double Q; static double Q0; static double nd; static double nd0; static double lambda0; static string outdir; static int totDeg; static function1D<string> Eds; static int N_ac; static double dom_ac; static int acore, pcore; static bool SubtractLorentz; static double LorentzMaxRatio; static double SearchLorentz; static int FirstLorentz; static int LastLorentz; static double dlmin; static bool renorm_core, renorm; static bool cmp_susc; static double Fimp, Epot, TrLogGimp; static void SetParameters(Par<double>& Ed_, double U_, /*double J_, */double T_, double Q0_, const string& outdir_, int N_ac_, double dom_ac_, int acore_, int pcore_, bool SubtractLorentz_, double SearchLorentz_, double LorentzMaxRatio_, int FirstLorentz_, int LastLorentz_, bool renorm_core_, bool renorm_) { dlmin = 2.0; LorentzMaxRatio = LorentzMaxRatio_; SearchLorentz = SearchLorentz_; SubtractLorentz=SubtractLorentz_; FirstLorentz=FirstLorentz_; // First pseudoparticle which could be augmented with lorentz LastLorentz=LastLorentz_; // Last pseudoparticle which could be augmented with lorentz Ed.resize(baths); int i=0; while (Ed_.next() && i<baths) { Ed[i] = Ed_; i++; } for (int j=i; j<baths; j++) Ed[j]=Ed[i-1]; T = T_; U = U_; // J = J_; beta=1/T_; Q0 = Q0_; outdir = outdir_; Eds.resize(baths); for (int i=0; i<baths; i++){ stringstream t; t<<"E"<<i; Eds[i] = t.str(); } nalpha.resize(baths); miss_nd.resize(baths); for (int i=0; i<baths; i++) miss_nd[i]=0; N_ac = N_ac_; dom_ac = dom_ac_; acore = acore_; pcore = pcore_; renorm_core=renorm_core_; renorm=renorm_; moment.resize(baths,2); Fimp=Epot=TrLogGimp=0.0; } static void ParsInputFile(const string& filename); static void PrintParsedData(ostream& stream); static ostream& printHead(ostream& stream); }; class sLorentz{ public: double x0, gamma, P; bool exist; sLorentz() : x0(0), gamma(1), P(0), exist(false){}; void Set(double zero, double eps, double a, double p, double q, double r) { exist = true; //double A = (sqr(1-p)+sqr(q))/a-2*eps*q*r/sqr(a)+sqr(eps*r/a)/a; double A = (sqr(1-p)+sqr(q))/a-2*eps*q*(r/a)/a+sqr(eps*r/a)/a; double B = eps*q/a-sqr(eps)/a*(r/a)/2; double C = sqr(eps)/a; double b2 = C/A-sqr(B/A); x0 = -B/A; gamma = (b2>0)? sqrt(b2) : sqrt(abs(C/A)); if (gamma==0) { exist=false; P=0; return; } //cout<<"a="<<a<<" A="<<A<<" B="<<B<<" C="<<C<<" b2="<<b2<<" gamma="<<gamma<<endl; P = 1/(A*gamma); x0 += zero; } void SetFalse(){exist=false; P=0;} private: double IntgA(double om0, double om1, double A0, double A1, double omega, double x0) const { if (!exist) return 0; if (fabs(om1-om0)*100<gamma) return P*gamma*0.5*(A0+A1)*(om1-om0)/(sqr(0.5*(om0+om1)+omega-x0)+sqr(gamma)); double c0 = om0 + omega - x0; double c1 = om1 + omega - x0; double dA = (A1-A0)/(om1-om0); if (abs(c0)>100*gamma && abs(c1)>100*gamma && c0*c1>0) return P*gamma*( (A0-dA*c0)*(1/c0-1/c1)+dA*log(c1/c0)+0.5*dA*(sqr(gamma/c1)-sqr(gamma/c0)) ); ///// HERE WAS A BUG!! Corrected Dec/6/2013. if (abs(c0)>100*gamma && abs(c1)>100*gamma && c1-c0>199.9*gamma) return P*( (A0-dA*c0)*(M_PI+gamma*(1/c0-1/c1))+dA*gamma*log(abs(c1/c0))+0.5*dA*gamma*(sqr(gamma/c1)-sqr(gamma/c0)) ); ///// HERE WAS A BUG!! Corrected Dec/6/2013. //if (abs(c0)>1 && abs(c1)>1) return P*gamma*(c1-c0)*0.5*(A1+A0)/(c1*c0); ///// HERE WAS A BUG!! Corrected Dec/6/2013. double a0 = c0/gamma; double a1 = c1/gamma; double R; if (fabs(gamma)<1e-30){ R= P*gamma*((A0-dA*c0)*(1/c0-1/c1)+dA*log(fabs(c1/c0))); }else{ R = P*((A0-dA*c0)*(atan(a1)-atan(a0))+0.5*gamma*dA*log((1+sqr(a1))/(1+sqr(a0)))); } if (isnan(R) || isinf(R)){ cerr<<"R is nan or inf "<<R<<" "<<om0<<" "<<om1<<" "<<A0<<" "<<A1<<" "<<omega<<" "<<x0<<" "<<c0<<" "<<c1<<endl; cerr<<"to "<<(1+sqr(a1))<<" "<<(1+sqr(a0))<<" a0="<<a0<<" a1="<<a1<<" gamma="<<gamma<<" c0="<<c0<<" c1="<<c1<<" "<<atan(a1)-atan(a0)<<" "<<(A0-dA*c0)<<" "<<log((1+sqr(a1))/(1+sqr(a0)))<<endl; } return R; } public: double IntgAp(double om0, double om1, double A0, double A1, double omega)const{ return IntgA(om0, om1, A0, A1, omega, x0);} double IntgAm(double om0, double om1, double A0, double A1, double omega)const{ return IntgA(om0, om1, A0, A1, -omega, -x0);} double IntgApLL(const sLorentz& l, double omega) const { return P*l.P*M_PI*(gamma+l.gamma)/(sqr(gamma+l.gamma)+sqr(x0-l.x0-omega)); } double V(double x){ return P*gamma/(sqr(x-x0)+sqr(gamma));} friend ostream& operator<<(ostream& stream, const sLorentz& s); }; ostream& operator<<(ostream& stream, const sLorentz& s) { if (s.exist) stream<<setw(15)<<s.x0<<" "<<setw(15)<<s.gamma<<" "<<setw(15)<<s.P<<" "; return stream; } // Auxiliary self-energies and spectral functions class Auxiliary{ const int Na, Nc, baths; mesh1D om; function1D<double> fe; function1D<double> fedh; function1D<double> logo; function2D<double> Sigt; function2D<double> Sigtn; function2D<dcomplex> Sigc; function2D<dcomplex> Sigcore; function2D<double> Gt; function2D<double> Gp; function2D<double> Gm; vector<function2D<double> > aAc; function1D<double> Acx; function1D<double> Acy; function2D<double> Acp, Acm; AvFun<double> aF; function1D<double> Energy; function1D<double> Probability; mesh1D oml; function2D<dcomplex> Deltam_ac, Deltap_ac; function1D<double> mom_Deltam_ac, mom_Deltap_ac; function1D<dcomplex> Sigtmp; int mpos, m0, m1; function2D<double> GtA1, GtA2; vector<sLorentz> lorentzm, lorentzp; public: Auxiliary (int Na_, int Nc_, int baths_) : Na(Na_), Nc(Nc_), baths(baths_), aAc(2*baths), mom_Deltam_ac(baths), mom_Deltap_ac(baths), lorentzm(Na), lorentzp(Na),Probability(Na){}; bool ReadSelfEnergy(const string& filename, const Par<double>& Ed, const Par<double>& T, const Par<double>& U, const mesh1D& ph_omd, const function2D<double>& ph_Ac); void KramarsKronig(); double DeterminSpectralFunctions(double StartLambda, double EndLambda, double dLamdba, int followPeak); void PrintOutMeanQ(double StartLambda, double EndLambda); void PrintNorm(ostream& stream); void Print(int l, string dir); void Printn(int l); void SetSignToZero(){Sigtn=0.0;Sigcore=0.0;} void SetUpAverageAc(const mesh1D& omd, const mesh1D& momd, const function2D<double>& Ack, const function1D<double>& fed); void CalcSigmab(const mesh1D& omd); void CalcSigmaf(const mesh1D& omd); double Difference(); double DeterminSelfEnergies(double alpha, int CmpDiff); const mesh1D& omega() const {return om;} double ferm(int i) const {return fe[i];} const function2D<double>& _Gp() const {return Gp;} const function2D<double>& _Gm() const {return Gm;} void PrintSign(); double Q(double lambda); double operator()(double lambda); double minEnergy; void PrintCore(const string& filename); const function1D<double>& Energ() const{return Energy;} const vector<sLorentz>& Lorentzm()const{return lorentzm;} const vector<sLorentz>& Lorentzp()const{return lorentzp;} void CreateSigma000(const mesh1D& omd, const function2D<double>& Ac); private: void Print_aAc(int l); void Print_Qux(int l); void Print_Sign(int l, int st, int en); void PrintOutMeanQ(int M, double StartLambda, double EndLambda); }; // Physical electron spectral function and suscpetibility // Physical observables class Physical{ public: const int Na, Nc, baths; mesh1D omd; function2D<dcomplex> G00; function2D<double> A00; function1D<double> C00; function1D<dcomplex> Chi; function2D<double> A00c; function2D<dcomplex> Sig; private: mesh1D momd; function1D<double> fed; function1D<double> logod; function1D<double> th; function2D<double> Ac; function2D<dcomplex> Delta0; vector<AvFun<double> > aF; function2D<double> Gtx; function2D<double> Cmp; function1D<double> tG; function1D<bool> Pexists; public: Physical(int Na_, int Nc_, int baths_); bool ReadBathFunction(const string& filename, bool spectra); void CalculateA00(const mesh1D& omega, const function2D<double>& Gp, const function2D<double>& Gm, const function1D<double>& Energy, const vector<sLorentz>& lorentzm, const vector<sLorentz>& lorentzp); void KramarsKronig(); void DeterminG00(double alpha,ostream& loging); double Difference(); void Print(int l, string dir); void Print0(const string& filename); const mesh1D& omega() const {return omd;} const mesh1D& momega() const {return momd;} const function1D<double>& fe() const {return fed;} const function2D<double>& Ac0() const {return Ac;} void PrintA00(ostream& out); void CalcSelfEnergy(); void MissingDoping(double start); private: void CalculateProducts(double u, double fu, const mesh1D& om, const function2D<double>& Gm); bool ReadBeginning(const string& filename, istream& input, int& n, int& m, bool& begincomment, double& center); }; void AverageFunction(const mesh1D& omx, double u, const mesh1D& eps, AvFun<double>& aF, functionb<double>& aAc) { apar ap; cintpar pi; tint position = omx.InitInterpLeft(); InterpLeft(eps[0]-u, omx, position, pi); aF.InterpolateFirst(pi); InterpLeft(eps[1]-u, omx, position, pi); ap.SetUpCsFirst(u, eps); aAc[0] = aF.InterpolateNext(pi, ap) * eps.Dh(0); for (int j=1; j<eps.size()-1; j++){ InterpLeft(eps[j+1]-u, omx, position, pi); ap.SetUpCs(u, j, eps, omx.Dh(pi.i)); aAc[j] = aF.InterpolateNext(pi, ap) * eps.Dh(j); } ap.SetUpCsLast(u, eps); aAc[eps.size()-1] = aF.InterpolateLast(ap) * eps.Dh(eps.size()-1); } inline double product(const double* A, const double* G, int size) { double sum = 0; for (int i=0; i<size; i++) sum += A[i]*G[i]; return sum; } void Auxiliary::SetUpAverageAc(const mesh1D& omd, const mesh1D& momd, const function2D<double>& Ack, const function1D<double>& fed) { int m = om.find_(0.0)+1; Acx.resize(omd.size()); for (int b=0; b<baths; b++){ aAc[b].resize(om.size(),om.size()); for (int i=0; i<omd.size(); i++) Acx[i] = Ack[b][i]*(1-fed[i]); aF.SetUp(Acx,omd); for (int i=0; i<m; i++) AverageFunction(omd,om[i],om,aF,aAc[b][i]); for (int i=0; i<omd.size(); i++) Acx[i] = Ack[b][i]*fed[i]; aF.SetUp(Acx,omd); for (int i=m; i<om.size(); i++) AverageFunction(omd,om[i],om,aF,aAc[b][i]); aAc[baths+b].resize(om.size(),om.size()); for (int i=0; i<momd.size(); i++) Acx[momd.size()-i-1] = Ack[b][i]*fed[i]; aF.SetUp(Acx,momd); for (int i=0; i<m; i++) AverageFunction(momd,om[i],om,aF,aAc[baths+b][i]); for (int i=0; i<momd.size(); i++) Acx[momd.size()-i-1] = Ack[b][i]*(1-fed[i]); aF.SetUp(Acx,momd); for (int i=m; i<om.size(); i++) AverageFunction(momd,om[i],om,aF,aAc[baths+b][i]); } // For core part need Delta in more extended range Acy.resize(omd.size()); oml.resize(omd.size()+2*common::N_ac); for (int i=0; i<common::N_ac; i++) oml[i] = omd[0]-(common::N_ac-i)*common::dom_ac; for (int i=0; i<omd.size(); i++) oml[i+common::N_ac] = omd[i]; for (int i=0; i<common::N_ac; i++) oml[omd.size()+common::N_ac+i] = omd.last()+(i+1)*common::dom_ac; oml.SetUp(omd.dcenter()); Deltam_ac.resize(baths,oml.size()); Deltap_ac.resize(baths,oml.size()); Acp.resize(baths,omd.size()); Acm.resize(baths,omd.size()); for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ Acm(b,i) = Ack[b][i]*fed[i]; Acp(b,i) = Ack[b][i]*(1-fed[i]); } int ofst=0; #pragma omp parallel for for (int i=0; i<common::N_ac; i++){ double Deltar = ::KramarsKronig(Acm[b], omd, oml[i], 0, 0.0); Deltam_ac[b][i] = dcomplex(-M_PI*Deltar,0.0); Deltar = ::KramarsKronig(Acp[b], omd, oml[i], 0, 0.0); Deltap_ac[b][i] = dcomplex(-M_PI*Deltar,0.0); } ofst=common::N_ac; #pragma omp parallel for for (int i=0; i<omd.size(); i++){ double Deltar = ::KramarsKronig(Acm[b], omd, omd[i], i, Acm[b][i]); Deltam_ac[b][ofst+i] = dcomplex(-M_PI*Deltar,-M_PI*Acm[b][i]); Deltar = ::KramarsKronig(Acp[b], omd, omd[i], i, Acp[b][i]); Deltap_ac[b][ofst+i] = dcomplex(-M_PI*Deltar,-M_PI*Acp[b][i]); } ofst=common::N_ac+omd.size(); #pragma omp parallel for for (int i=0; i<common::N_ac; i++){ double Deltar = ::KramarsKronig(Acm[b], omd, oml[omd.size()+common::N_ac+i], omd.size()-1, 0.0); Deltam_ac[b][ofst+i] = dcomplex(-M_PI*Deltar, 0.0); Deltar = ::KramarsKronig(Acp[b], omd, oml[omd.size()+common::N_ac+i], omd.size()-1, 0.0); Deltap_ac[b][ofst+i] = dcomplex(-M_PI*Deltar, 0.0); } double summ=0; for (int i=0; i<omd.size(); i++) summ += Acm[b][i]*omd.Dh(i); double sump=0; for (int i=0; i<omd.size(); i++) sump += Acp[b][i]*omd.Dh(i); mom_Deltam_ac[b] = summ; mom_Deltap_ac[b] = sump; } } void Auxiliary::CalcSigmab(const mesh1D& omd) { for (int b=0; b<baths; b++){ GtA1.Product(Gm,aAc[b],0,mpos); // Gm[f,eps]*Acfm[x,eps] GtA2.Product(Gp,aAc[b],mpos,aAc[b].size_N()); // Gp[f,eps]*Acfp[x,eps] if (common::SubtractLorentz){ #pragma omp parallel for for (int j=0; j<Na; j++){ if (lorentzm[j].exist){ tint pos0=omd.size()-2, pos1=omd.size()-2; double dlmin_x0 = -common::dlmin + lorentzm[j].x0; double dlmin_x1 = common::dlmin + lorentzm[j].x0; for (int i=0; i<mpos; i++){ int k0 = omd._find(dlmin_x0 - om[i], 0, pos0); int k1 = omd._find(dlmin_x1 - om[i], 0, pos1); double sum=0; for (int k=k0; k<k1; k++) sum += lorentzm[j].IntgAp(omd[k], omd[k+1], Acp(b,k), Acp(b,k+1), om[i]); GtA1(j,i) += sum; } } if (lorentzp[j].exist){ tint pos0=omd.size()-2, pos1=omd.size()-2; double dlmin_x0 = -common::dlmin + lorentzp[j].x0; double dlmin_x1 = common::dlmin + lorentzp[j].x0; for (int i=mpos; i<om.size(); i++){ int k0 = omd._find(dlmin_x0 - om[i], 0, pos0); int k1 = omd._find(dlmin_x1 - om[i], 0, pos1); double sum = 0; for (int k=k0; k<k1; k++) sum += lorentzp[j].IntgAp(omd[k], omd[k+1], Acm(b,k), Acm(b,k+1), om[i]); GtA2(j,i-mpos) += sum; } } } } #pragma omp parallel for for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = l->second/static_cast<double>(common::deg[j]); for (int i=0; i<mpos; i++) Sigtn(j,i) += prf * GtA1(ind,i)/fe[i]; for (int i=mpos; i<om.size(); i++) Sigtn(j,i) += prf * GtA2(ind,i-mpos)/(1-fe[i]); } } } } if (!common::acore) return; for (int b=0; b<baths; b++){ for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; if (ind>=Na && ind<Na+Nc){ double prf = l->second/static_cast<double>(common::deg[j]); tint position = oml.InitInterpRight(); for (int i=0; i<om.size(); i++){ double x = Energy[ind]-common::lambda0-om[i]; dcomplex Delta=0; if (x>oml.last()) Delta = mom_Deltam_ac[b]/x; else Delta = Deltam_ac[b](oml.InterpRight(x, position)); Sigcore[j][i] += prf*Delta; } } } } } } void Auxiliary::CalcSigmaf(const mesh1D& omd) { for (int b=0; b<baths; b++){ GtA1.Product(Gm,aAc[baths+b],0,mpos); GtA2.Product(Gp,aAc[baths+b],mpos,aAc[baths+b].size_N()); if (common::SubtractLorentz){ #pragma omp parallel for for (int j=0; j<Na; j++){ if (lorentzm[j].exist){ tint pos0=0, pos1=0; double dlmin_x0 = -common::dlmin - lorentzm[j].x0; double dlmin_x1 = common::dlmin - lorentzm[j].x0; for (int i=0; i<mpos; i++){ int k0 = omd.find_(dlmin_x0 + om[i], pos0); int k1 = omd.find_(dlmin_x1 + om[i], pos1); double sum = 0; //for (int k=0; k<omd.size()-1; k++) for (int k=k0; k<k1; k++) sum += lorentzm[j].IntgAm(omd[k], omd[k+1], Acm(b,k), Acm(b,k+1), om[i]); GtA1(j,i) += sum; } } if (lorentzp[j].exist){ tint pos0=0, pos1=0; double dlmin_x0 = -common::dlmin - lorentzp[j].x0; double dlmin_x1 = common::dlmin - lorentzp[j].x0; for (int i=mpos; i<om.size(); i++){ int k0 = omd.find_(dlmin_x0 + om[i], pos0); int k1 = omd.find_(dlmin_x1 + om[i], pos1); double sum = 0; // for (int k=0; k<omd.size()-1; k++) for (int k=k0; k<k1; k++) sum += lorentzp[j].IntgAm(omd[k], omd[k+1], Acp(b,k), Acp(b,k+1), om[i]); GtA2(j,i-mpos) += sum; } } } } #pragma omp parallel for for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = l->second/static_cast<double>(common::deg[j]); for (int i=0; i<mpos; i++) Sigtn(j,i) += prf * GtA1(ind,i)/fe[i]; for (int i=mpos; i<om.size(); i++) Sigtn(j,i) += prf * GtA2(ind,i-mpos)/(1-fe[i]); } } } } if (!common::acore) return; for (int b=0; b<baths; b++){ for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){ int ind = l->first; if (ind>=Na && ind<Na+Nc){ double prf = l->second/static_cast<double>(common::deg[j]); tint position = oml.InitInterpLeft(); for (int i=0; i<om.size(); i++){ double x = om[i]-Energy[ind]+common::lambda0; dcomplex Delta=0; if (x<om[0]) Delta = mom_Deltap_ac[b]/x; else Delta = Deltap_ac[b](oml.InterpLeft(x, position)); Sigcore[j][i] += prf*Delta; } } } } } } void Auxiliary::CreateSigma000(const mesh1D& omd, const function2D<double>& Ac) {// If inteligence guess for the pseudo-particles self-energy is not found, // it creates a guess using atomic type of approximation. Sigt=0; for (int b=0; b<baths; b++){ for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double Em = Energy[ind]-minEnergy; double prf = l->second/static_cast<double>(common::deg[j]); tint pos = omd.InitInterpRight(); for (int i=0; i<om.size(); i++){ double ff; if (om[i]>0) ff = ferm_f((Em-om[i])/common::T)/(1-fe[i]); else{ double eom = exp(om[i]/common::T); ff = (eom+1.)/(eom+exp(Em/common::T)); } Sigt(j,i) += -M_PI*prf*ff*Ac[b](omd.InterpRight(Em-om[i],pos)); } } } for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double Em = Energy[ind]-minEnergy; double prf = l->second/static_cast<double>(common::deg[j]); tint pos = omd.InitInterpLeft(); for (int i=0; i<om.size(); i++){ double ff; if (om[i]>0) ff = ferm_f((Em-om[i])/common::T)/(1-fe[i]); else{ double eom = exp(om[i]/common::T); ff = (eom+1.)/(eom+exp(Em/common::T)); } Sigt(j,i) += -M_PI*prf*ff*Ac[b](omd.InterpLeft(om[i]-Em,pos)); } } } } } KramarsKronig(); } inline ostream& common::printHead(ostream& stream) { stream<<"# "; stream<<" nb="<<baths<<" "; //stream<<" T="<<T<<" ntot="<<nd<<" U="<<U<<" lambda0="<<lambda0<<" "; stream<<" T="<<T<<" ntot="<<nd<<" U="<<U<<" dFimpG="<<Fimp-TrLogGimp<<" Fimp="<<Fimp<<" Epot="<<Epot<<" TrLogGimp="<<TrLogGimp<<" lambda0="<<lambda0<<" "; stream<<" Ns=["; for (int i=0; i<baths-1; i++) stream<<Ns[i]<<","; stream<<Ns[baths-1]<<"] "; stream<<" Eimp=["; for (int i=0; i<baths-1; i++) stream<<Ed[i]<<","; stream<<Ed[baths-1]<<"] "; stream<<" nf=["; for (int i=0; i<baths-1; i++) stream<<nalpha[i]<<","; stream<<nalpha[baths-1]<<"] "; stream<<" md=["; for (int i=0; i<baths-1; i++) stream<<miss_nd[i]<<","; stream<<miss_nd[baths-1]<<"] "; stream<<" moment=["; for (int i=0; i<baths-1; i++) stream<<"["<<moment[i][0]<<","<<moment[i][1]<<"],"; stream<<"["<<moment[baths-1][0]<<","<<moment[baths-1][1]<<"]] "; if (Sinfty.size()>0){ double aS=0; for (int i=0; i<baths; i++) aS += Sinfty[i]; aS/=baths; stream<<" aSinfty="<<aS<<" "; stream<<" Sinfty=("; for (int i=0; i<baths-1; i++)stream<<Sinfty[i]<<","; stream<<Sinfty[baths-1]<<") "; } return stream; } void RememberParams (int argc, char *argv[]){ ofstream param ((common::outdir+"/history.nca").c_str(), ios::app); if (!param) cerr<<" Didn't suceeded to open params file!"<<(common::outdir+"/history.nca")<<endl; for (int i=0; i<argc; i++) param << argv[i] << " "; param << endl; } template <class T> bool ReadValue(T& a, const std::string& variable, const std::string& str){ std::string::size_type pos = str.find(variable); if (pos < std::string::npos){ std::string::size_type poseq = str.find("=",pos); if (poseq<std::string::npos){ std::istringstream streambuff(std::string(str,poseq+1)); streambuff >> a; } return true; } return false; } bool Auxiliary::ReadSelfEnergy(const string& filename, const Par<double>& Ed, const Par<double>& T, const Par<double>& U, const mesh1D& ph_omd, const function2D<double>& ph_Ac){ ifstream inputf(filename.c_str()); istream input(inputf.rdbuf()); input.seekg(0,ios::beg); if (!input) { cerr << "Can't open input file: " << filename << endl; return false; } // Is the input file started with comment? bool begincomment = false; int n = 0; string str; const double SpecNumber = -100000; double T_ = SpecNumber, U_ = SpecNumber; function1D<double> Ed_(baths); Ed_ = SpecNumber; double center = 0; getline(input,str); if (str.find('#')<string::npos){ begincomment = true; for (int i=0; i<baths; i++) ReadValue(Ed_[i], common::Eds[i], str); ReadValue(T_, "T", str); ReadValue(U_, "U", str); if (!ReadValue(center, "peakposition", str)) center=0; } else n++; if (!Ed.IsSet() && Ed_[0]!=SpecNumber) for (int i=0; i<baths; i++) common::Ed[i] = Ed_[i]; if (!T.IsSet() && T_!=SpecNumber) common::T = T_; if (!U.IsSet() && U_!=SpecNumber) common::U = U_; common::beta = 1./common::T; Energy.resize(Na+Nc); minEnergy=0; // Calculates auxiliary Energies for (int i=0; i<Na+Nc; i++){ Energy[i] = 0; for (int j=0; j<baths; j++) Energy[i] += common::Ed[j]*common::Ms[i][j]; // Energy[i] += 0.5*common::Mtot[i]*(common::Mtot[i]-1)*(common::U-0.5*common::J); // Energy[i] += common::J*common::sJc[i]; Energy[i] += 0.5*common::Mtot[i]*(common::Mtot[i]-1)*common::U; Energy[i] += common::sJc[i]; if (Energy[i]<minEnergy) minEnergy = Energy[i]; } clog<<"************* Parameters ****************"<<endl; clog<<" U = "<<common::U<<endl; for (int i=0; i<baths; i++) clog<<" Ed"<<i<<" = "<<common::Ed[i]<<endl; clog<<" T = "<<common::T<<endl; for (int i=0; i<baths; i++) clog<<" N"<<i<<" = "<<common::Ns[i]<<endl; for (int i=0; i<Na+Nc; i++){ if (i<Na) clog<<" valence state"<<setw(2)<<left<<i<<right<<" = "; else clog<<" core state"<<i<<" = "; for (int j=0; j<baths; j++) clog<<setw(2)<<common::Ms[i][j]; clog<<" with Energy"<<setw(2)<<left<<i<<right<<" = "<<Energy[i]<<endl; } clog<<"*****************************************"<<endl; // Computes the number of columns in file if (!input) { cerr << "ERROR: Wrong file format for Sigm" << endl; return false; } getline(input,str); n++; #ifdef _STRSTREAM strstream oneline; oneline << str <<ends; #else istringstream oneline(str); #endif int m=0; double t; while (oneline){oneline>>t; m++;} m--; while (input){ getline(input,str); n++;} n--; clog << filename << ": Number of entries: "<< n <<endl; clog << filename << ": Number of columns: "<< m <<endl; clog << filename << ": Peak-position "<< center <<endl; bool CreateDefault = false; if (m<2*Na+1){ //cerr<<"ERROR: Not enough columns is input Sigma file. Exiting!"<<endl; clog<<"WARRNING: Not enough columns is input self-energy for pseudoparticles.... Creating default!"<<endl; CreateDefault = true; } inputf.seekg(0,ios::beg); // clog<<"Premaknil na "<< inputf.tellg()<<endl; if (begincomment) inputf.ignore(10000,'\n'); if (!inputf){ cerr<<"Reopening didn't suceeded!"<<endl; return false;} om.resize(n); Sigt.resize(Na,n); Sigc.resize(Na,n); int l=0; double omega; while (inputf>>omega && l<n){ om[l] = omega; if (!CreateDefault){ for (int i=0; i<Na; i++){ double Sr, St; inputf>>Sr; inputf>>St; Sigc(i,l) = dcomplex(Sr,-St); Sigt(i,l) = -St; } } getline(inputf, str); l++; } inputf.close(); if (l<n) cerr<<"Something wrong by reading file "<<filename<<endl; om.SetUp(center); mpos = om.find_(0.0)+1; m0 = om.find_(-common::SearchLorentz); m1 = om.find_(common::SearchLorentz)+1; GtA1.resize(Na,mpos); GtA2.resize(Na,om.size()-mpos); Sigcore.resize(Na,om.size()); Sigtn.resize(Na,om.size()); Gt.resize(Na,om.size()); Gp.resize(Na,om.size()); Gm.resize(Na,om.size()); fe.CalcFermOnMesh(common::beta, om); logo.CalcLogOnMesh(om); fedh.resize(om.size()); for (int i=0; i<om.size(); i++) fedh[i] = fe[i]*om.Dh(i); if (CreateDefault){ CreateSigma000(ph_omd, ph_Ac); }else{ for (int j=0; j<Na; j++){ for (int i=0; i<om.size(); i++) Sigc(j,i) = dcomplex(Sigc(j,i).real(), Sigc(j,i).imag()*(1-fe[i])); } } return true; } void Auxiliary::KramarsKronig() { for (int l=0; l<Na; l++){ for (int i=0; i<om.size(); i++) Sigc(l,i).imag() = Sigt(l,i)*(1-fe[i]); Sigc[l].KramarsKronig(om, logo); } } double Lambda(double E, const functionb<dcomplex>& Sigc, const functionb<double>& Sigx, const mesh1D& om) { // looking for lambda such that \widetilde{G} has maximum at zero frequency. // Sufficient condition is that the derivative of 1/\widetilde{G} is zero at zero frequency. // One gets a quadratic equation for lambda and thus two roots. Then one chooses the root that maximizes \widetilde{G}. // If no root exists, than we take lambda that minimizes linear coeficient in the expansion of 1/\widetilde{G}. // The latter equation is linear and one always gets unique solution. intpar p = om.Interp(0.0); int i=p.i; dcomplex cs = -E-Sigc(p); dcomplex ds = (Sigc[i+1]-Sigc[i])*om.Delta(i); double cr = cs.real(); double ci = cs.imag(); double dcr = 1-ds.real(); double dci = -ds.imag(); double dSigx = (Sigx[i+1]-Sigx[i])*om.Delta(i); double x = Sigx[i]/dSigx; double determinant2 = x*(x*dcr*dcr+2*ci*dci)-ci*ci; // Minimum can not be at zero. Try to find lambda that minimizes the linear coefficient in the expansion of 1/G // If 1/G = a + b omega + c omega^2 +... and the below determinant is smaller than zero, coefficient b can not be // set to zero. Than return lambda that gives the smallest b. if (determinant2<=0) return dcr*x-cr; double d2 = -sqrt(determinant2); double d1 = -cr + dcr*x; double v1 = 1/(sqr(ci)+sqr(cr+d1+d2)); double v2 = 1/(sqr(ci)+sqr(cr+d1-d2)); cout<<"Lambda="<<d1+d2<<" "<<d1-d2<<" "<<v1<<" "<<v2<<endl; if (fabs(v1)>fabs(v2)) return d1+d2; else return d1-d2; } double Auxiliary::Q(double lambda) { double sumQ=0; for (int j=0; j<Na; j++){ double mune = -Energy[j]+lambda; sLorentz lorentz; if (common::SubtractLorentz && j>=common::FirstLorentz && j<=common::LastLorentz){ double v0 = om[m0]+mune-Sigc(j,m0).real(), v=v0; int ii=0; for (ii=m0+1; ii<m1; ii++) { v = om[ii]+mune-Sigc(j,ii).real(); if (sign(v)*sign(v0)<0) break; } double denom = om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real(); if (denom==0) cout<<"denom="<<denom<<endl; if (sign(v)*sign(v0)<0 && denom!=0){ double zero = om[ii-1]-(om[ii]-om[ii-1])*(om[ii-1]+mune-Sigc(j,ii-1).real())/(om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real()); intpar ip(ii-1,(zero-om[ii-1])/(om[ii]-om[ii-1])); double dom = om[ii]-om[ii-1]; dcomplex Sc = Sigc[j](ip); double ratio = abs(Sc.imag()/dom); if (ratio<common::LorentzMaxRatio){ double Sm = Sigt[j](ip)*fe(ip); dcomplex dSc = (Sigc[j][ii]-Sigc[j][ii-1])/dom; //(om[ii]-om[ii-1]); double dSm = (Sigt[j][ii]*fe[ii]-Sigt[j][ii-1]*fe[ii-1])/dom; //(om[ii]-om[ii-1]); double Sc_im = Sc.imag(); if (fabs(Sc_im)<1e-20) Sc_im=-1e-20; if (fabs(Sm)<1e-20) Sm=-1e-20; if (fabs(Sc_im)>=1e-20 && fabs(Sm)>=1e-20){ lorentz.Set(zero, Sc_im, Sm, dSc.real(), dSc.imag(), dSm); //cout<<"QFound zero "<<setw(2)<<left<<j<<right<<setw(10)<<zero<<" "<<lorentz<<endl;//setw(15)<<Sc<<" "<<setw(15)<<-St<<" "; } } } } double sum=0, v; for (int i=0; i<om.size(); i++){ v = fedh[i]*Sigt(j,i)/(sqr(om[i]+mune-Sigc(j,i).real())+sqr(Sigc(j,i).imag())); if (lorentz.exist) v -= om.Dh(i)*lorentz.V(om[i]); sum -= v; } sum -= lorentz.P*M_PI; sumQ += sum*common::deg[j]; } return (sumQ/M_PI); } inline double Auxiliary::operator()(double lambda) { double Q_ = Q(lambda); return Q_-common::Q0; } void Auxiliary::PrintOutMeanQ(double StartLambda, double EndLambda) { double a0 = StartLambda; int M = 100; double da0 = (EndLambda-StartLambda)/M; cout.precision(16); for (int i=0; i<M; i++){ cout << a0 << setw(25) << operator()(a0) << endl; a0 += da0; } } double Auxiliary::DeterminSpectralFunctions(double StartLambda, double EndLambda, double dLambda, int followPeak) { double lambda0; if (followPeak>=0 && followPeak<Na) lambda0 = Lambda(Energy[followPeak], Sigc[followPeak], Sigt[followPeak], om); else if (followPeak==-2){ lambda0 = minEnergy; }else{ double a0 = StartLambda, b0 = 0; int sign=0, nn=0; while (!sign && nn++<100){ double pQ = operator()(a0); while (!sign && a0<=b0) { double sQ = operator()(a0+dLambda); sign = pQ*sQ<0; pQ = sQ; if (!sign) a0 += dLambda; } if (!sign) dLambda /= 2.0; } if (nn>=100) { cerr << "Can't find root for <Q>" << endl; PrintOutMeanQ(StartLambda, EndLambda); exit(1); } // loking for zero (lambda0) lambda0 = zeroin(a0, a0+dLambda, *this, 1e-15*common::Q0); } common::lambda0 = lambda0; clog << setprecision(16) << "; lambda = "<<lambda0<<" "<<lambda0-minEnergy<<endl; double sumQ = 0, sumnd=0; function1D<double> dQ(Na); for (int j=0; j<Na; j++){ double mune = -Energy[j]+lambda0; if (common::SubtractLorentz && j>=common::FirstLorentz && j<=common::LastLorentz){ double v = om[m0]+mune-Sigc(j,m0).real(), v0=v; int ii=0; for (ii=m0+1; ii<m1; ii++) { v = om[ii]+mune-Sigc(j,ii).real(); if (sign(v)*sign(v0)<0) break; } bool found = false; double denom = om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real(); if (sign(v)*sign(v0)<0 && denom!=0){ double zero = om[ii-1]-(om[ii]-om[ii-1])*(om[ii-1]+mune-Sigc(j,ii-1).real())/(om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real()); intpar ip(ii-1,(zero-om[ii-1])/(om[ii]-om[ii-1])); double dom = om[ii]-om[ii-1]; dcomplex Sc = Sigc[j](ip); double ratio = abs(Sc.imag()/dom); //clog<<"ps"<<j<<" ratio="<<ratio<<endl; if (ratio<common::LorentzMaxRatio){ double Sm = Sigt[j](ip)*fe(ip); dcomplex dSc = (Sigc[j][ii]-Sigc[j][ii-1])/dom; double dSm = (Sigt[j][ii]*fe[ii]-Sigt[j][ii-1]*fe[ii-1])/dom; double Sc_im = Sc.imag(); if (fabs(Sc_im)<1e-20) Sc_im=-1e-20; if (fabs(Sm)<1e-20) Sm=-1e-20; if (fabs(Sc_im)>=1e-20 && fabs(Sm)>=1e-20){ found = true; lorentzm[j].Set(zero, Sc_im, Sm, dSc.real(), dSc.imag(), dSm); lorentzp[j].Set(zero, Sc_im, Sc_im, dSc.real(), dSc.imag(), dSc.imag()); //cout<<"Sc.im="<<Sc.imag()<<" Sm="<<Sm<<" dSc.r="<<dSc.real()<<" dSc.i="<<dSc.imag()<<" dSm="<<dSm<<endl; //cout<<"zero="<<zero<<" ratio="<<ratio<<" Sm="<<Sm<<" dSm="<<dSm<<" Sc_im="<<Sc_im<<endl; cout<<"Found lorentz at "<<setw(4)<<left<<j<<right<<setw(10)<<zero<<" lm="<<lorentzm[j]<<" lp="<<lorentzp[j]<<" r-"<<setw(15)<<ratio<<endl; } } } if (!found){ lorentzp[j].SetFalse(); lorentzm[j].SetFalse(); } } } // // We want to make sure that only one integer occupacition is treated with lorentz // // because we did not yet implement Lorentz*Lorentz // int MaxMtot=0; // for (int i=0; i<Na; i++) if (MaxMtot<common::Mtot[i]) MaxMtot = common::Mtot[i]; // function1D<int> lorex(MaxMtot+1);lorex=0; // for (int j=0; j<Na; j++) if (lorentzm[j].exist ||lorentzp[j].exist) lorex[common::Mtot[j]]++; // int imaxLorentz=0; // for (int i=0; i<=MaxMtot; i++) if (lorex[i]>lorex[imaxLorentz]) imaxLorentz=i; // for (int i=0; i<Na; i++){ // if (lorentzm[i].exist && common::Mtot[i]!=imaxLorentz) { cout<<"Lorentzm for "<<i<<" not accepted!"<<endl; lorentzm[i].SetFalse();} // if (lorentzp[i].exist && common::Mtot[i]!=imaxLorentz) { cout<<"Lorentzp for "<<i<<" not accepted!"<<endl; lorentzp[i].SetFalse();} // } for (int j=0; j<Na; j++){ double mune = -Energy[j]+lambda0; dQ[j]=0; for (int i=0; i<om.size(); i++){ Gt(j,i) = Sigt(j,i)/(sqr(om[i]+mune-Sigc(j,i).real())+sqr(Sigc(j,i).imag())); Gm(j,i) = fe[i]*Gt(j,i); Gp(j,i) = (1-fe[i])*Gt(j,i); if (lorentzm[j].exist) Gm(j,i) -= lorentzm[j].V(om[i]); if (lorentzp[j].exist) Gp(j,i) -= lorentzp[j].V(om[i]); dQ[j] -= Gm(j,i)*om.Dh(i); } dQ[j] -= lorentzm[j].P*M_PI; dQ[j] *= common::deg[j]/M_PI; sumQ += dQ[j]; sumnd += dQ[j]*common::Mtot[j]; } clog<<" Q = "<<sumQ<<endl; for (int j=0; j<Na; j++){ Probability[j] = dQ[j]/sumQ; clog<<setprecision(16)<<" n"<<j<<"="<<dQ[j]/sumQ<<endl; } for (int b=0; b<baths; b++){ common::nalpha[b]=0; for (int j=0; j<Na; j++) common::nalpha[b] += dQ[j]*common::Ms[j][b]; common::nalpha[b]/=sumQ; } common::Q = sumQ; common::Fimp = common::lambda0-common::T * ::log(common::Q); double Epot=0; for (int j=0; j<Na; j++) Epot += Probability[j]*Energy[j]; double dEpot=0; for (int b=0; b<baths; b++) dEpot += common::Ed[b]*common::nalpha[b]; common::Epot = Epot-dEpot; clog<<" Fimp="<<common::Fimp<<" Epot="<<common::Epot<<" Epot+OneP="<<Epot<<endl; // if (fabs(sumQ-common::Q0)>1e-10) cerr<<"Something wrong with Q "<<sumQ<<"!"<<endl; clog<<" Q is here equal to "<<sumQ<<endl; return sumnd/sumQ; } void Auxiliary::Print(int l, string dir="") { string filename; if (l<0) filename = common::outdir+"/Sigma"+dir; else filename = NameOfFile(common::outdir+"/Sigma", l); ofstream out1(filename.c_str()); out1.precision(16); common::printHead(out1)<<" peakposition="<<om.dcenter()<<endl; for (int i=0; i<om.size(); i++){ out1<<setw(25)<<om[i]; for (int j=0; j<Na; j++) out1<<setw(25)<<Sigc(j,i).real()<<" "<<setw(25)<<-Sigt(j,i); out1<<endl; } if (l<0) filename = common::outdir+"/Spec"+dir; else filename = NameOfFile(common::outdir+dir+"/Spec", l); ofstream out2(filename.c_str()); out2.precision(16); common::printHead(out2)<<" peakposition="<<om.dcenter()<<endl; for (int i=0; i<om.size(); i++){ out2<<setw(25)<<om[i]; for (int j=0; j<Na; j++) out2<<setw(25)<<-Gt(j,i); for (int j=0; j<Na; j++) out2<<setw(25)<<-Gp(j,i); for (int j=0; j<Na; j++) out2<<setw(25)<<-Gm(j,i); out2<<endl; } } void Auxiliary::Printn(int l) { string filename; filename = NameOfFile(common::outdir+"/nSigma", l); ofstream out1(filename.c_str()); out1.precision(16); common::printHead(out1)<<" peakposition="<<om.dcenter()<<endl; for (int i=0; i<om.size(); i++){ out1<<setw(25)<<om[i]; for (int j=0; j<Na; j++) out1<<setw(25)<<-Sigtn(j,i); out1<<endl; } } Physical::Physical(int Na_, int Nc_, int baths_) : Na(Na_), Nc(Nc_), baths(baths_), aF(Na) { Pexists.resize(Na); for (int j=0; j<Na; j++){ Pexists[j]=false; for (int b=0; b<baths; b++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ if (l->first >=0 && l->first < Na){ Pexists[j]=true; break; } } } if (!Pexists[j] && common::cmp_susc){ for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++) if (l->first >=0 && l->first < Na){ Pexists[j]=true; break; } } } } bool Physical::ReadBeginning(const string& filename, istream& input, int& n, int& m, bool& begincomment, double& center) { if (!input) { cerr << "Can't open input file: " << filename << endl; return false; } // Is the input file started with comment? begincomment = false; n = 0; string str; getline(input,str); if (str.find('#')<string::npos){ begincomment = true; if (!ReadValue(center, "peakposition", str)) center=0; } else n++; // Computes the number of columns in file if (!input) { cerr << "ERROR: Wrong file format for Sigm" << endl; return false; } getline(input,str); n++; stringstream oneline; oneline << str << ends; m=0; double t; while (oneline){oneline>>t; m++;} m--; while (input){ getline(input,str); n++;} n--; clog << filename << ": Number of entries: "<< n <<endl; clog << filename << ": Number of columns: "<< m <<endl; clog << filename << ": Peak-position "<< center <<endl; input.seekg(0, ios::beg); input.clear(); if (begincomment) getline(input, str); return true; } bool Physical::ReadBathFunction(const string& filename, bool spectra=true) // spectra=true: only spectral function will be read not the retarded quantity { ifstream inputf(filename.c_str()); istream input(inputf.rdbuf()); input.seekg(0,ios::beg); if (!input) { cerr << "Can't open input file: " << filename << endl; return false; } // Is the input file started with comment? bool begincomment = false; int n = 0; string str; double center=0; getline(input,str); if (str.find('#')<string::npos){ begincomment = true; if (!ReadValue(center, "peakposition", str)) center=0; } else n++; // Computes the number of columns in file if (!input) { cerr << "ERROR: Wrong file format for " << filename << endl; return false; } getline(input,str); n++; #ifdef _STRSTREAM strstream oneline; oneline << str <<ends; #else istringstream oneline(str); #endif int m=0; double t; while (oneline){oneline>>t; m++;} m--; while (input){ getline(input,str); n++;} n--; clog << filename << ": Number of entries: "<< n <<endl; clog << filename << ": Number of columns: "<< m <<endl; clog << filename << ": Peak-position "<< center <<endl; int number_cols = baths+1; if (!spectra) number_cols = 2*baths+1; if (m<number_cols){ cerr<<"ERROR: Not enough columns in bath input file! Exiting..."<<endl; return false; } inputf.seekg(0, ios::beg); clog<<"Premaknil na "<< inputf.tellg()<<endl; if (begincomment) inputf.ignore(1000,'\n'); if (!inputf){ cerr<<"Reopening didn't suceeded!"<<endl; return false;} omd.resize(n); momd.resize(n); G00.resize(baths,n); A00.resize(baths,n); A00c.resize(baths,n); Sig.resize(baths,n); Ac.resize(baths,n); Delta0.resize(baths,n); if (common::cmp_susc){ C00.resize(n); Chi.resize(n); } int l=0; double omega; while (inputf>>omega && l<n){ omd[l] = omega; if (spectra) for (int j=0; j<baths; j++) inputf>>Ac(j,l); else{ for (int j=0; j<baths; j++) { double dr, di; inputf>>dr; inputf>>di; Ac(j,l) = -di/M_PI; Delta0(j,l) = dcomplex(dr,di); } } getline(inputf, str); momd[n-l-1] = -omd[l]; l++; } inputf.close(); if (l<n) cerr<<"Something wrong by reading file "<<filename<<endl; omd.SetUp(center); momd.SetUp(-center); fed.CalcFermOnMesh(common::beta, omd); th.CalcTanhOnMesh(common::beta, omd); logod.CalcLogOnMesh(omd); if (spectra){ for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ double Deltar = ::KramarsKronig(Ac[b], omd, omd[i], i, Ac[b][i]); Delta0(b,i) = dcomplex(-M_PI*Deltar,-M_PI*Ac[b][i]); } } } return true; } void Physical::CalculateProducts(double u, double fu, const mesh1D& om, const function2D<double>& Gm) { apar ap; cintpar pi; tint position = om.InitInterpLeft(); InterpLeft(om[0]-u, om, position, pi); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].InterpolateFirst(pi); InterpLeft(om[1]-u, om, position, pi); ap.SetUpCsFirst(u, om); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,0) = aF[i].InterpolateNext(pi, ap) * om.Dh(0); for (int j=1; j<om.size()-1; j++){ InterpLeft(om[j+1]-u, om, position, pi); ap.SetUpCs(u, j, om, om.Dh(pi.i+1)); //#pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,j) = aF[i].InterpolateNext(pi, ap) * om.Dh(j); } ap.SetUpCsLast(u, om); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,om.size()-1) = aF[i].InterpolateLast(ap) * om.Dh(om.size()-1); Cmp.resize(Na,Na); #pragma omp parallel for for (int i=0; i<Na; i++){ for (int b=0; b<baths; b++){ for (map<int,double>::const_iterator l=common::sncab[i][b].begin(); l!=common::sncab[i][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na) Cmp(i,ind) = product(Gtx[i].MemPt(),Gm[ind].MemPt(),om.size())/fu; } } if (common::cmp_susc){ for (map<int,double>::const_iterator l=common::suscb[i].begin(); l!=common::suscb[i].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na) Cmp(i,ind) = product(Gtx[i].MemPt(),Gm[ind].MemPt(),om.size())/fu; } } } } void Physical::CalculateA00(const mesh1D& omega, const function2D<double>& Gp, const function2D<double>& Gm, const function1D<double>& Energy, const vector<sLorentz>& lorentzm, const vector<sLorentz>& lorentzp) { int m = omd.find_(0.0)+1; Gtx.resize(Na, omega.size()); #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].SetUp(Gp[i],omega); for (int i=0; i<m; i++){ CalculateProducts(omd[i], fed[i], omega, Gm); #pragma omp parallel for for (int b=0; b<baths; b++){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; double prf = l->second/common::Ns[b]; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } A00(b,i) = sum/(M_PI*M_PI*common::Q); } if (common::cmp_susc){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++){ int ind = l->first; double prf = l->second; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } C00[i] = sum*th[i]/(M_PI*common::Q); } } #pragma omp parallel for for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].SetUp(Gm[i],omega); for (int i=m; i<omd.size(); i++){ CalculateProducts(omd[i], (1-fed[i]), omega, Gp); #pragma omp parallel for for (int b=0; b<baths; b++){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ int ind = l->first; double prf = l->second/common::Ns[b]; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } A00(b,i) = sum/(M_PI*M_PI*common::Q); } if (common::cmp_susc){ double sum=0; for (int j=0; j<Na; j++) for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++){ int ind = l->first; double prf = l->second; if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind); } C00[i] = sum*th[i]/(M_PI*common::Q); } } if (common::SubtractLorentz){ for (int b=0; b<baths; b++){ //cout<<"Starting parallel part"<<endl; double* A00_private = new double[omd.size()]; for (int s=0; s<omd.size(); s++) A00_private[s]=0.0; for (int i=0; i<Na; i++){ for (map<int,double>::const_iterator l=common::sncab[i][b].begin(); l!=common::sncab[i][b].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = (l->second/common::Ns[b])/(M_PI*M_PI)/common::Q; if (lorentzm[ind].exist){ #pragma omp parallel for for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[ind].IntgAp(omega[k], omega[k+1], Gp(i,k), Gp(i,k+1), omd[j]); //A00(b,j) += sum*prf/fed[j]; A00_private[j] += sum*prf/fed[j]; } } if (lorentzp[ind].exist){ #pragma omp parallel for for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[ind].IntgAp(omega[k], omega[k+1], Gm(i,k), Gm(i,k+1), omd[j]); //A00(b,j) += sum*prf/(1-fed[j]); A00_private[j] += sum*prf/(1-fed[j]); } } if (lorentzp[i].exist){ #pragma omp parallel for for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[i].IntgAp(omega[k], omega[k+1], Gm(ind,k), Gm(ind,k+1), -omd[j]); //A00(b,j) += sum*prf/fed[j]; A00_private[j] += sum*prf/fed[j]; } } if (lorentzm[i].exist){ #pragma omp parallel for for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[i].IntgAp(omega[k], omega[k+1], Gp(ind,k), Gp(ind,k+1), -omd[j]); //A00(b,j) += sum*prf/(1-fed[j]); A00_private[j] += sum*prf/(1-fed[j]); } } if (lorentzm[ind].exist && lorentzp[i].exist) #pragma omp parallel for for (int j=0; j<m; j++){ //A00(b,j) += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf/fed[j]; A00_private[j] += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf/fed[j]; } if (lorentzp[ind].exist && lorentzm[i].exist) #pragma omp parallel for for (int j=m; j<omd.size(); j++){ //A00(b,j) += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf/(1-fed[j]); A00_private[j] += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf/(1-fed[j]); } } } } for (int s=0; s<omd.size(); s++) A00(b,s) += A00_private[s]; delete[] A00_private; //cout<<"Just ended parallel part"<<endl; } if (common::cmp_susc){ for (int i=0; i<Na; i++){ for (map<int,double>::const_iterator l=common::suscb[i].begin(); l!=common::suscb[i].end(); l++){ int ind = l->first; if (ind>=0 && ind<Na){ double prf = (l->second)/(M_PI*common::Q); if (lorentzm[ind].exist){ for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[ind].IntgAp(omega[k], omega[k+1], Gp(i,k), Gp(i,k+1), omd[j]); C00[j] += sum*prf*th[j]/fed[j]; } } if (lorentzp[ind].exist){ for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[ind].IntgAp(omega[k], omega[k+1], Gm(i,k), Gm(i,k+1), omd[j]); C00[j] += sum*prf*th[j]/(1-fed[j]); } } if (lorentzp[i].exist){ for (int j=0; j<m; j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzp[i].IntgAp(omega[k], omega[k+1], Gm(ind,k), Gm(ind,k+1), -omd[j]); C00[j] += sum*prf*th[j]/fed[j]; } } if (lorentzm[i].exist){ for (int j=m; j<omd.size(); j++){ double sum=0; for (int k=0; k<omega.size()-1; k++) sum += lorentzm[i].IntgAp(omega[k], omega[k+1], Gp(ind,k), Gp(ind,k+1), -omd[j]); C00[j] += sum*prf*th[j]/(1-fed[j]); } } if (lorentzm[ind].exist && lorentzp[i].exist) for (int j=0; j<m; j++) C00[j] += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf * th[j]/fed[j]; if (lorentzp[ind].exist && lorentzm[i].exist) for (int j=m; j<omd.size(); j++) C00[j] += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf * th[j]/(1-fed[j]); } } } } } if (common::pcore){ // core stuff for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ double sum1=0; for (int j=0; j<Na; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ if (l->first >= Na){ int ind = l->first; double x = Energy[ind]-common::lambda0-omd[i]; double prf = l->second/common::Ns[b]; sum1 -= prf*Gm[j](omega.Interp(x))/common::Q/M_PI; } } } double sum2=0; for (int j=Na; j<Na+Nc; j++){ for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){ if (l->first >= 0 && l->first<Na){ int ind = l->first; double x = Energy[j]-common::lambda0+omd[i]; double prf = l->second/common::Ns[b]; sum2 -= prf*Gm[ind](omega.Interp(x))/common::Q/M_PI; } } } A00c(b,i) = sum1+sum2; } } // Checking doping! for (int b=0; b<baths; b++){ double suma = 0, sumc = 0; for (int i=0; i<omd.size(); i++) { suma += A00(b,i)*fed[i]*omd.Dh(i); sumc += A00c(b,i)*fed[i]*omd.Dh(i); } suma *= common::Ns[b]; sumc *= common::Ns[b]; double miss_nd = common::nalpha[b]-(suma+sumc); double core_fact = 1.; if (sumc!=0 && common::renorm_core){ core_fact = (common::nalpha[b]-suma)/sumc; if (core_fact<0) core_fact=0; if (core_fact>10) core_fact = 10; cout<<b<<" : "<<miss_nd<<" renormaliziang core part by "<<core_fact<<endl; } for (int i=0; i<omd.size(); i++) A00(b,i) += A00c(b,i)*core_fact; if (common::renorm){ double suml=0, sumr=0; for (int i=0; i<omd.size(); i++){ suml += A00(b,i)*fed[i]*omd.Dh(i); sumr += A00(b,i)*(1-fed[i])*omd.Dh(i); } int izero = omd.find_(0.0); double ml1=0, mr1=0; for (int i=0; i<izero; i++) { ml1 += omd[i]*A00(b,i)*fed[i]*omd.Dh(i); mr1 += omd[i]*A00(b,i)*(1-fed[i])*omd.Dh(i); } double ml2=0, mr2=0; for (int i=izero+1; i<omd.size(); i++) { ml2 += omd[i]*A00(b,i)*fed[i]*omd.Dh(i); mr2 += omd[i]*A00(b,i)*(1-fed[i])*omd.Dh(i); } double n0 = common::nalpha[b]/common::Ns[b]; double C = (-ml2 + ml2*n0 + mr2*n0 - mr2*suml + ml2*sumr)/(ml1*mr2-ml2*mr1); double D = (ml1 - ml1*n0 - mr1*n0 + mr1*suml - ml1*sumr)/(ml1*mr2-ml2*mr1); if (1+C*omd[0]<0) C = -1/omd[0]; if (1+D*omd.last()<0) D = -1/omd.last(); for (int i=0; i<izero; i++) A00(b,i) *= (1+C*omd[i]); for (int i=izero+1; i<omd.size(); i++) A00(b,i) *= (1+D*omd[i]); cout<<"Renormalizing A["<<b<<"] by "<<C<<", "<<D<<"at negative and positive frequency"<<endl; } } } // ofstream out("Aloc.imp"); out.precision(16); // for (int i=0; i<omd.size(); i++){ // out<<setw(25)<<omd[i]<<" "; // for (int b=0; b<baths; b++) out<<setw(25)<<A00(b,i)<<" "; // out<<endl; // } } inline void Physical::KramarsKronig() { for (int b=0; b<baths; b++) G00[b].KramarsKronig(omd,logod); } void Physical::CalcSelfEnergy() { for (int b=0; b<baths; b++){ for (int i=0; i<omd.size(); i++){ //double Deltar = ::KramarsKronig(Ac[b], omd, omd[i], i, Ac[b][i]); //dcomplex Delta(-M_PI*Deltar,-M_PI*Ac[b][i]); Sig[b][i] = omd[i]-common::Ed[b]-Delta0[b][i]-1/G00[b][i]; if (Sig[b][i].imag()>0) Sig[b][i].imag()=0.0; } } if (common::cmp_susc){ for (int i=0; i<omd.size(); i++) Chi[i] = dcomplex(::KramarsKronig(C00, omd, omd[i], i, C00[i]),C00[i]); } } void Physical::Print(int n, string dir="") { string filename; if (n<0) filename = common::outdir+"/A00"+dir; else filename = common::outdir+NameOfFile("/A00",n,3); ofstream out(filename.c_str()); out.precision(16); common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++){ out <<setw(25)<<omd[i]; for (int b=0; b<baths; b++) out<<setw(25)<<A00[b][i]<<setw(25)<<G00[b][i]<<setw(25)<<-Sig[b][i]; out<<endl; } if (n<0) filename = common::outdir+"/Susc"+dir; else filename = common::outdir+NameOfFile("/Susc",n,3); ofstream outs(filename.c_str()); outs.precision(16); common::printHead(outs)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++) outs <<setw(25)<<omd[i]<<setw(25)<<Chi[i]<<endl; } void Physical::Print0(const string& filename) { ofstream out(filename.c_str()); out.precision(16); common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++){ out <<setw(25)<<omd[i]; for (int b=0; b<baths; b++) out<<setw(25)<<A00[b][i]; for (int b=0; b<baths; b++) out<<setw(25)<<G00[b][i]; for (int b=0; b<baths; b++) out<<setw(25)<<-Sig[b][i]; out<<endl; } } double Auxiliary::DeterminSelfEnergies(double alpha,int CmpDiff){ double beta=1-alpha; Sigtmp.resize(om.size()); if (CmpDiff<0) CmpDiff = Na; double diff=0, norm=0; for (int j=0; j<Na; j++){ for (int i=0; i<om.size(); i++) if (Sigtn(j,i)>0) Sigtn(j,i)=0; for (int i=0; i<om.size(); i++) Sigtmp[i].imag() = Sigtn(j,i)*(1-fe[i]); Sigtmp.KramarsKronig(om, logo); for (int i=0; i<om.size(); i++){ dcomplex Sigcn = Sigtmp[i] + Sigcore(j,i); Sigtn(j,i) += Sigcore(j,i).imag(); if (j<CmpDiff){ diff += fabs(Sigtn(j,i)-Sigt(j,i)); norm += fabs(Sigt(j,i)); } Sigt(j,i) = beta*Sigt(j,i)+alpha*Sigtn(j,i); Sigc(j,i) = beta*Sigc(j,i)+alpha*Sigcn; } } return diff/norm; } void Physical::DeterminG00(double alpha,ostream& loging) { double beta=1-alpha; double alphapi=-alpha*M_PI; for (int b=0; b<baths; b++){ for (int j=0; j<omd.size(); j++) G00[b][j].imag()=beta*G00[b][j].imag()+alphapi*A00[b][j]; G00[b].KramarsKronig(omd,logod); } common::TrLogGimp=0.0; for (int b=0; b<baths; b++){ double Ndf=0.0; double dsum=0; for (int j=0; j<omd.size(); j++){ dsum += -log(-G00[b][j]).imag()*fed[j]*omd.Dh(j)/M_PI; Ndf += -G00[b][j].imag()*fed[j]*omd.Dh(j)/M_PI; } common::TrLogGimp += dsum*common::Ns[b]; Ndf *= common::Ns[b]; loging<<"Expected density:"<<common::nalpha[b]<<" numerical density:"<<Ndf<<endl; } loging<<"TrLogGimp="<<common::TrLogGimp<<endl; } void Auxiliary::PrintNorm(ostream& stream) { stream<<" Norm of Spectral functions: "<<endl<<" "; stream.setf(ios::fixed); for (int i=0; i<Na; i++){ double sum=0; for (int j=0; j<om.size(); j++) sum += Gp(i,j)*om.Dh(j); sum += lorentzp[i].P*M_PI; sum/=-M_PI; double norm0=1; stream<<setprecision(4)<<" "; if (fabs(sum-norm0)<1e-2) stream<<COLOR(GREEN,setw(2)<<i<<":"<<setw(8)<<sum)<<" "; else if (fabs(sum-norm0)<1e-1) stream<<COLOR(YELLOW,setw(2)<<i<<":"<<setw(8)<<sum)<<" "; else stream<<COLOR(PURPLE,setw(2)<<i<<":"<<setw(8)<<sum)<<" "; if ((i+1)%6==0) stream<<endl<<" "; } stream<<endl; for (int b=0; b<baths; b++){ stream<<setprecision(4)<<" "<<COLOR(BLUE,setw(2)<<b<<":"<<setw(8)<<common::nalpha[b])<<" "; } stream<<endl; stream.unsetf(ios::fixed); } void Physical::PrintA00(ostream& out) { out.precision(16); common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl; for (int i=0; i<omd.size(); i++){ out<<setw(25)<<omd[i]; for (int b=0; b<baths; b++) out<<setw(25)<<A00[i]; out<<endl; } } double Auxiliary::Difference(){ double diff=0, norm=0; for (int j=0; j<Na; j++){ for (int i=0; i<om.size(); i++){ diff += fabs(Sigtn(j,i)-Sigt(j,i)); norm += 0.5*fabs(Sigtn(j,i)+Sigtn(j,i)); } } return diff/norm; } /******************* Used only for debugging **********************/ void Auxiliary::PrintSign() { for (int i=0; i<Na; i++){ ofstream out(NameOfFile("Sign",i,2).c_str()); out.precision(16); for (int j=0; j<om.size(); j++) out<<setw(25)<<om[j]<<setw(25)<<-Sigtn[i][j]<<endl; } } void Auxiliary::Print_aAc(int l) { for (int i=0; i<aAc[0].size_N(); i++){ ofstream out(NameOfFile_("aAc",l,i,1,3).c_str()); out.precision(16); for (int j=0; j<aAc[0].size_Nd(); j++){ out<<setw(25)<<om[j]<<setw(25)<<aAc[0][i][j]/om.Dh(j)<<endl; } } } /******************* New things ******************************/ void common::ParsInputFile(const string& filename) { ifstream input(filename.c_str()); string line; getline(input,line); input>>baths; Ns.resize(baths); for (int i=0; i<baths; i++) input>>Ns[i]; input>>Na; input>>Nc; getline(input,line); getline(input,line); if (!input){ cerr<<filename<<" file not recognized. Error in first 3 lines!"<<endl; exit(1);} deg.resize(Na+Nc); Ms.resize(Na+Nc,baths); Mtot.resize(Na+Nc); sJc.resize(Na+Nc); ncab.resize(Na+Nc, baths); ncaf.resize(Na+Nc, baths); prefactb.resize(Na+Nc, baths); prefactf.resize(Na+Nc, baths); prefactG.resize(Na+Nc, baths); sncab.resize(Na+Nc); sncaf.resize(Na+Nc); for (int i=0; i<Na+Nc; i++) sncab[i].resize(baths); for (int i=0; i<Na+Nc; i++) sncaf[i].resize(baths); vector<int> Nncab(baths), Nncaf(baths); for (int i=0; i<Na+Nc; i++){ getline(input, line); if (!input){ cerr<<filename<<" file not recognized. Error in line number "<<i+3<<endl; exit(1);} stringstream thisline(line); int lc; thisline>>lc; for (int j=0; j<baths; j++) thisline>>Ms[i][j]; thisline>>Mtot[i]>>deg[i]>>sJc[i]; for (int j=0; j<baths; j++) thisline>>Nncab[j]; for (int j=0; j<baths; j++) thisline>>Nncaf[j]; string cross; double fct; int ind; for (int j=0; j<baths; j++){ for (int k=0; k<Nncab[j]; k++){ thisline>>fct>>cross>>ind; sncab[i][j][ind]=fct; } } for (int j=0; j<baths; j++){ for (int k=0; k<Nncaf[j]; k++){ thisline>>fct>>cross>>ind; sncaf[i][j][ind]=fct; } } if (!input){ cerr<<filename<<" file not recognized. Error in line number "<<i+3<<endl; exit(1);} } getline(input, line);// comment cmp_susc = false; if (input){ suscb.resize(Na); for (int i=0; i<Na; i++){ getline(input, line); if (!input) goto exit_loop; stringstream thisline(line); int lc; thisline>>lc; int ndiagram; thisline>>ndiagram; string cross; double fct; int ind; for (int j=0; j<ndiagram; j++){ thisline>>fct>>cross>>ind; suscb[i][ind]=fct; } } cmp_susc = true; } exit_loop: PrintParsedData(cout); totDeg = 0; for (int i=0; i<Na; i++) totDeg += deg[i]; } void common::PrintParsedData(ostream& stream) { stream<<baths<<" "; for (int i=0; i<baths; i++) stream<<Ns[i]<<" "; stream<<Na<<" "<<Nc<<endl; for (int i=0; i<Na+Nc; i++){ stream<<setw(3)<<i<<" "; if (i<Na) stream<<"v "; else stream<<"c "; for (int j=0; j<baths; j++) stream<<setw(10)<<Ms[i][j]; stream<<setw(4)<<Mtot[i]<<setw(5)<<deg[i]<<setw(6)<<sJc[i]; for (int b=0; b<baths; b++) stream<<setw(2)<<sncab[i][b].size()<<" "; for (int b=0; b<baths; b++) stream<<setw(2)<<sncaf[i][b].size()<<" "; for (int b=0; b<baths; b++) for (map<int,double>::const_iterator l=sncab[i][b].begin(); l!=sncab[i][b].end(); l++) stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right; for (int b=0; b<baths; b++) for (map<int,double>::const_iterator l=sncaf[i][b].begin(); l!=sncaf[i][b].end(); l++) stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right; stream<<endl; } if (!cmp_susc) return; stream<<"Susceptibility digrams:"<<endl; for (int i=0; i<Na; i++){ stream<<setw(3)<<i<<" "; for (map<int,double>::const_iterator l=suscb[i].begin(); l!=suscb[i].end(); l++) stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right; stream<<endl; } } void print(std::ostream& stream, const mesh1D& om, const function2D<dcomplex>& f, int width=20) { if (om.size()!=f.size_Nd()) std::cerr<<"Can't print objectc of different size!"<<std::endl; for (int i=0; i<om.size(); i++){ stream <<std::setw(width)<<om[i]; for (int j=0; j<f.size_N(); j++) stream<<std::setw(width)<<f(j,i); stream<<std::endl; } } void Physical::MissingDoping(double start) { cout<<"Missing doping : "; for (int b=0; b<baths; b++){ double sum = 0; for (int i=0; i<omd.size(); i++) { if (omd[i]>start) sum += G00[b][i].imag()*fed[i]*omd.Dh(i); } sum *= -common::Ns[b]/M_PI; common::miss_nd[b] = common::nalpha[b]-sum; cout<<b<<" : "<<common::miss_nd[b]<<" "; } cout<<endl; common::Sinfty.resize(baths); for (int b=0; b<baths; b++){ double sum0 = 0, sum1 = 0; for (int i=0; i<omd.size(); i++) { sum0 += A00(b,i)*omd.Dh(i); sum1 += A00(b,i)*omd[i]*omd.Dh(i); } common::moment[b][0] = sum0; common::moment[b][1] = sum1; common::Sinfty[b] = sum1/sum0-common::Ed[b]; } } void Auxiliary::PrintCore(const string& filename) { ofstream out(filename.c_str()); for (int i=0; i<om.size(); i++){ out<<setw(20)<<om[i]<<" "; for (int j=0; j<Na; j++){ out<<setw(20)<<Sigcore[j][i]<<" "; } out<<endl; } } #endif
single.c
/* * Copyright (c) 2014 ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group. */ #include <bomp_internal.h> /* * this functions implement the SINGLE construct * * #pragma omp single * { * body; * } * * becomes * * if (GOMP_single_start ()) * body; * GOMP_barrier (); * * and * * #pragma omp single copyprivate(x) * { * body; * } * * becomse * * datap = GOMP_single_copy_start (); * if (datap == NULL) { * body; * data.x = x; * GOMP_single_copy_end (&data); * } else { * x = datap->x; * } * GOMP_barrier (); */ /* This function should return true for just the first thread */ bool GOMP_single_start(void) { struct bomp_thread_local_data *local = g_bomp_state->backend.get_tls(); if (local == NULL || local->work->thread_id == 0) { return true; } return false; } void *GOMP_single_copy_start (void) { assert(!"NYI"); return NULL; } void GOMP_single_copy_end (void *data) { assert(!"NYI"); }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[4], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); artifact=GetImageArtifact(image, "trim:edges"); if (artifact == (const char *) NULL) { bounds.width=image->columns == 1 ? 1 : 0; bounds.height=image->rows == 1 ? 1 : 0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; } else { char *edges, *q, *r; bounds.width=(size_t) image->columns; bounds.height=(size_t) image->rows; bounds.x=0; bounds.y=0; edges=AcquireString(artifact); r=edges; while ((q=StringToken(",",&r)) != (char *) NULL) { if (LocaleCompare(q,"north") == 0) bounds.y=(ssize_t) image->rows; if (LocaleCompare(q,"east") == 0) bounds.width=0; if (LocaleCompare(q,"south") == 0) bounds.height=0; if (LocaleCompare(q,"west") == 0) bounds.x=(ssize_t) image->columns; } edges=DestroyString(edges); } GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t) image->rows-1,1,1,exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[3]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; q=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (q == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } q+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ memset(&edge_background,0,sizeof(edge_background)); artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *monotone_info, *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_info=AcquireVirtualMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_info == (MemoryInfo *) NULL)) { if (monotone_info != (MemoryInfo *) NULL) monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_info=RelinquishVirtualMemory(monotone_info); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { for (depth=1; depth < (size_t) MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[j])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[j])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[j] == ScaleAnyToQuantum(ScaleQuantumToAny(p[j],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image) != MagickFalse) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } type=IdentifyImageGray(image,exception); if (IsGrayImageType(type)) return(type); if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsGrayImageType(image->type)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=depth_map[ScaleQuantumToMap(q[j])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
atomic_read_codegen.c
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics // REQUIRES: x86-registered-target #ifndef HEADER #define HEADER _Bool bv, bx; char cv, cx; unsigned char ucv, ucx; short sv, sx; unsigned short usv, usx; int iv, ix; unsigned int uiv, uix; long lv, lx; unsigned long ulv, ulx; long long llv, llx; unsigned long long ullv, ullx; float fv, fx; double dv, dx; long double ldv, ldx; _Complex int civ, cix; _Complex float cfv, cfx; _Complex double cdv, cdx; typedef int int4 __attribute__((__vector_size__(16))); int4 int4x; struct BitFields { int : 32; int a : 31; } bfx; struct BitFields_packed { int : 32; int a : 31; } __attribute__ ((__packed__)) bfx_packed; struct BitFields2 { int : 31; int a : 1; } bfx2; struct BitFields2_packed { int : 31; int a : 1; } __attribute__ ((__packed__)) bfx2_packed; struct BitFields3 { int : 11; int a : 14; } bfx3; struct BitFields3_packed { int : 11; int a : 14; } __attribute__ ((__packed__)) bfx3_packed; struct BitFields4 { short : 16; int a: 1; long b : 7; } bfx4; struct BitFields4_packed { short : 16; int a: 1; long b : 7; } __attribute__ ((__packed__)) bfx4_packed; typedef float float2 __attribute__((ext_vector_type(2))); float2 float2x; // Register "0" is currently an invalid register for global register variables. // Use "esp" instead of "0". // register int rix __asm__("0"); register int rix __asm__("esp"); int main() { // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read bv = bx; // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read cv = cx; // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read ucv = ucx; // CHECK: load atomic i16, i16* // CHECK: store i16 #pragma omp atomic read sv = sx; // CHECK: load atomic i16, i16* // CHECK: store i16 #pragma omp atomic read usv = usx; // CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read iv = ix; // CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read uiv = uix; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read lv = lx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read ulv = ulx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read llv = llx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read ullv = ullx; // CHECK: load atomic i32, i32* bitcast (float* // CHECK: bitcast i32 {{.*}} to float // CHECK: store float #pragma omp atomic read fv = fx; // CHECK: load atomic i64, i64* bitcast (double* // CHECK: bitcast i64 {{.*}} to double // CHECK: store double #pragma omp atomic read dv = dx; // CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128* // CHECK: store i128 [[LD]], i128* [[BITCAST]] // CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]] // CHECK: store x86_fp80 [[LD]] #pragma omp atomic read ldv = ldx; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store i32 // CHECK: store i32 #pragma omp atomic read civ = cix; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store float // CHECK: store float #pragma omp atomic read cfv = cfx; // CHECK: call{{.*}} void @__atomic_load(i64 16, // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store double // CHECK: store double #pragma omp atomic seq_cst read cdv = cdx; // CHECK: load atomic i64, i64* // CHECK: store i8 #pragma omp atomic read bv = ulx; // CHECK: load atomic i8, i8* // CHECK: store i8 #pragma omp atomic read cv = bx; // CHECK: load atomic i8, i8* // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i8 #pragma omp atomic read, seq_cst ucv = cx; // CHECK: load atomic i64, i64* // CHECK: store i16 #pragma omp atomic read sv = ulx; // CHECK: load atomic i64, i64* // CHECK: store i16 #pragma omp atomic read usv = lx; // CHECK: load atomic i32, i32* // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store i32 #pragma omp atomic seq_cst, read iv = uix; // CHECK: load atomic i32, i32* // CHECK: store i32 #pragma omp atomic read uiv = ix; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store i64 #pragma omp atomic read lv = cix; // CHECK: load atomic i32, i32* // CHECK: store i64 #pragma omp atomic read ulv = fx; // CHECK: load atomic i64, i64* // CHECK: store i64 #pragma omp atomic read llv = dx; // CHECK: load atomic i128, i128* // CHECK: store i64 #pragma omp atomic read ullv = ldx; // CHECK: call{{.*}} void @__atomic_load(i64 8, // CHECK: store float #pragma omp atomic read fv = cix; // CHECK: load atomic i16, i16* // CHECK: store double #pragma omp atomic read dv = sx; // CHECK: load atomic i8, i8* // CHECK: store x86_fp80 #pragma omp atomic read ldv = bx; // CHECK: load atomic i8, i8* // CHECK: store i32 // CHECK: store i32 #pragma omp atomic read civ = bx; // CHECK: load atomic i16, i16* // CHECK: store float // CHECK: store float #pragma omp atomic read cfv = usx; // CHECK: load atomic i64, i64* // CHECK: store double // CHECK: store double #pragma omp atomic read cdv = llx; // CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic // CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128* // CHECK: store i128 [[I128VAL]], i128* [[I128PTR]] // CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]] // CHECK: extractelement <4 x i32> [[LD]] // CHECK: store i8 #pragma omp atomic read bv = int4x[0]; // CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* [[LDTEMP_VOID_PTR]], i32 0) // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1 // CHECK: ashr i32 [[SHL]], 1 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx_packed.a; // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: ashr i32 [[LD]], 31 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2.a; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: ashr i8 [[LD]], 7 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx2_packed.a; // CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic // CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7 // CHECK: ashr i32 [[SHL]], 18 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx3.a; // CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* [[LDTEMP_VOID_PTR]], i32 0) // CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10 // CHECK: sext i24 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx3_packed.a; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63 // CHECK: trunc i64 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic read ldv = bfx4.a; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7 // CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7 // CHECK: sext i8 [[ASHR]] to i32 // CHECK: store x86_fp80 #pragma omp atomic relaxed read ldv = bfx4_packed.a; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic // CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40 // CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57 // CHECK: store x86_fp80 #pragma omp atomic read relaxed ldv = bfx4.b; // CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) acquire // CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]] // CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1 // CHECK: sext i8 [[ASHR]] to i64 // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store x86_fp80 #pragma omp atomic read acquire ldv = bfx4_packed.b; // CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic // CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64* // CHECK: store i64 [[LD]], i64* [[BITCAST]] // CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] // CHECK: extractelement <2 x float> [[LD]] // CHECK: store i64 #pragma omp atomic read ulv = float2x.x; // CHECK: call{{.*}} i{{[0-9]+}} @llvm.read_register // CHECK: call{{.*}} @__kmpc_flush( // CHECK: store double #pragma omp atomic read seq_cst dv = rix; return 0; } #endif
matmul_naive.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <math.h> #include <sys/time.h> //#include <mkl.h> #define N 8192 double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } float A[N][N], B[N][N], C[N][N]; /* Simple "naive" method to multiply two square matrices A and B to generate matrix C. */ //void myMult(int n, float *A, float *B, float *C) { //void myMult(int n, float A[N][N], float B[N][N], float C[N][N]) { void myMult() { //int i, j, k; #pragma omp target map(to:A, B) map(tofrom:C) //#pragma omp target data map(to:A[0:n*n], B[0:n*n], n) map(tofrom:C[0:n*n]) #pragma omp parallel for for(int i = 0; i < N; ++i) for(int k = 0; k < N; ++k) for(int j = 0; j < N; ++j) //C[i*n+j] += A[i*n+k] * B[k*n+j]; C[i][j] = A[i][k] * B[k][j]; } int main(int argc, char *argv[]) { if(argc != 4) { fprintf(stderr, "Use: %s size nThreads nIter\n", argv[0]); return -1; } int i, j, k, nt; //int N = atoi(argv[1]); int nThreads = atoi(argv[2]); int nIter = atoi(argv[3]); omp_set_num_threads(nThreads); memset(A, 0, N * N * sizeof(float)); memset(B, 0, N * N * sizeof(float)); memset(C, 0, N * N * sizeof(float)); //float *A = malloc(sizeof(float)*N*N); //float *B = malloc(sizeof(float)*N*N); //float *C = malloc(sizeof(float)*N*N); //#pragma omp parallel // nt = omp_get_num_threads(); // printf("%s nThreads %d matrix size %d\n", argv[0], nt, N); printf("Initializing input matrices...\n"); //#pragma omp parallel for private(i,j) for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { //A[i*N+j] = 1.0f; //B[i*N+j] = 1.0f; //C[i*N+j] = 0.0f; A[i][j] = 1.0f; B[i][j] = 1.0f; C[i][j] = 0.0f; } } printf("warm up run to overcome setup overhead\n"); //myMult(N, A, B, C); myMult(); double aveTime, minTime=1e6, maxTime=0.0f; printf("run the matrix multiplication function %d times\n", nIter); for(i=0; i < nIter; i++) { double startTime = rtclock(); //myMult(N, A, B, C); myMult(); double endTime = rtclock(); double runtime = endTime - startTime; maxTime=(maxTime > runtime)?maxTime:runtime; minTime=(minTime < runtime)?minTime:runtime; aveTime += runtime; printf("Iteration %d: runtime %.3f\n", i, runtime); } aveTime /= nIter; printf("maxRT %g minRT %g aveRT %g GFlop/s %g\n", maxTime, minTime, aveTime, 2e-9*N*N*N/aveTime); // free(A); // free(B); // free(C); return 0; }
par_coarsen.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" /*==========================================================================*/ /*==========================================================================*/ /** Selects a coarse "grid" based on the graph of a matrix. Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the "build interpolation" routine. \item CF\_marker - an array indicating both C-pts (value = 1) and F-pts (value = -1) \end{itemize} \item We define the following temporary storage: \begin{itemize} \item measure\_array - an array containing the "measures" for each of the fine-grid points \item graph\_array - an array containing the list of points in the "current subgraph" being considered in the coarsening process. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param S_ptr [OUT] strength matrix @param CF_marker_ptr [OUT] array indicating C/F points @see */ /*--------------------------------------------------------------------------*/ #define C_PT 1 #define F_PT -1 #define SF_PT -3 #define COMMON_C_PT 2 #define Z_PT -2 HYPRE_Int hypre_BoomerAMGCoarsen( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int CF_init, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int col_1 = hypre_ParCSRMatrixFirstColDiag(S); HYPRE_Int col_n = col_1 + hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd = 0; hypre_CSRMatrix *S_ext; HYPRE_Int *S_ext_i = NULL; HYPRE_Int *S_ext_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data; HYPRE_Real *buf_data; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd; HYPRE_Real *measure_array; HYPRE_Int *graph_array; HYPRE_Int *graph_array_offd; HYPRE_Int graph_size; HYPRE_Int graph_offd_size; HYPRE_Int global_graph_size; HYPRE_Int i, j, k, kc, jS, kS, ig, elmt; HYPRE_Int index, start, my_id, num_procs, jrow, cnt; HYPRE_Int ierr = 0; HYPRE_Int use_commpkg_A = 0; HYPRE_Int break_var = 1; HYPRE_Real wall_time; HYPRE_Int iter = 0; #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ S_ext = NULL; if (debug_flag == 3) wall_time = time_getWallclockSeconds(); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (!comm_pkg) { use_commpkg_A = 1; comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); num_cols_offd = hypre_CSRMatrixNumCols(S_offd); S_diag_j = hypre_CSRMatrixJ(S_diag); if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } /*---------------------------------------------------------- * Compute the measures * * The measures are currently given by the column sums of S. * Hence, measure_array[i] is the number of influences * of variable i. * * The measures are augmented by a random number * between 0 and 1. *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Real, num_variables+num_cols_offd); for (i=0; i < S_offd_i[num_variables]; i++) { measure_array[num_variables + S_offd_j[i]] += 1.0; } if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); for (i=0; i < S_diag_i[num_variables]; i++) { measure_array[S_diag_j[i]] += 1.0; } if (num_procs > 1) hypre_ParCSRCommHandleDestroy(comm_handle); index = 0; for (i=0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] += buf_data[index++]; } for (i=num_variables; i < num_variables+num_cols_offd; i++) { measure_array[i] = 0; } /* this augments the measures */ if (CF_init == 2) hypre_BoomerAMGIndepSetInit(S, measure_array, 1); else hypre_BoomerAMGIndepSetInit(S, measure_array, 0); /*--------------------------------------------------- * Initialize the graph array * graph_array contains interior points in elements 0 ... num_variables-1 * followed by boundary values *---------------------------------------------------*/ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables); if (num_cols_offd) graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); else graph_array_offd = NULL; /* initialize measure array and graph array */ for (ig = 0; ig < num_cols_offd; ig++) graph_array_offd[ig] = ig; /*--------------------------------------------------- * Initialize the C/F marker array * C/F marker array contains interior points in elements 0 ... * num_variables-1 followed by boundary values *---------------------------------------------------*/ graph_offd_size = num_cols_offd; if (CF_init==1) { CF_marker = *CF_marker_ptr; cnt = 0; for (i=0; i < num_variables; i++) { if ( (S_offd_i[i+1]-S_offd_i[i]) > 0 || CF_marker[i] == -1) { CF_marker[i] = 0; } if ( CF_marker[i] == Z_PT) { if (measure_array[i] >= 1.0 || (S_diag_i[i+1]-S_diag_i[i]) > 0) { CF_marker[i] = 0; graph_array[cnt++] = i; } else { CF_marker[i] = F_PT; } } else if (CF_marker[i] == SF_PT) measure_array[i] = 0; else graph_array[cnt++] = i; } } else { CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables); cnt = 0; for (i=0; i < num_variables; i++) { CF_marker[i] = 0; if ( (S_diag_i[i+1]-S_diag_i[i]) == 0 && (S_offd_i[i+1]-S_offd_i[i]) == 0) { CF_marker[i] = SF_PT; measure_array[i] = 0; } else graph_array[cnt++] = i; } } graph_size = cnt; if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); else CF_marker_offd = NULL; for (i=0; i < num_cols_offd; i++) CF_marker_offd[i] = 0; /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ if (num_procs > 1) { if (use_commpkg_A) S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0); else S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixJ(S_ext); } /* compress S_ext and convert column numbers*/ index = 0; for (i=0; i < num_cols_offd; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { k = S_ext_j[j]; if (k >= col_1 && k < col_n) { S_ext_j[index++] = k - col_1; } else { kc = hypre_BinarySearch(col_map_offd,k,num_cols_offd); if (kc > -1) S_ext_j[index++] = -kc-1; } } S_ext_i[i] = index; } for (i = num_cols_offd; i > 0; i--) S_ext_i[i] = S_ext_i[i-1]; if (num_procs > 1) S_ext_i[0] = 0; if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Initialize CLJP phase = %f\n", my_id, wall_time); } while (1) { /*------------------------------------------------ * Exchange boundary data, i.i. get measures and S_ext_data *------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); if (num_procs > 1) hypre_ParCSRCommHandleDestroy(comm_handle); index = 0; for (i=0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] += buf_data[index++]; } /*------------------------------------------------ * Set F-pts and update subgraph *------------------------------------------------*/ if (iter || (CF_init != 1)) { for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if ( (CF_marker[i] != C_PT) && (measure_array[i] < 1) ) { /* set to be an F-pt */ CF_marker[i] = F_PT; /* make sure all dependencies have been accounted for */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { if (S_diag_j[jS] > -1) { CF_marker[i] = 0; } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { if (S_offd_j[jS] > -1) { CF_marker[i] = 0; } } } if (CF_marker[i]) { measure_array[i] = 0; /* take point out of the subgraph */ graph_size--; graph_array[ig] = graph_array[graph_size]; graph_array[graph_size] = i; ig--; } } } /*------------------------------------------------ * Exchange boundary data, i.i. get measures *------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); buf_data[index++] = measure_array[jrow]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, &measure_array[num_variables]); hypre_ParCSRCommHandleDestroy(comm_handle); } /*------------------------------------------------ * Debugging: * * Uncomment the sections of code labeled * "debugging" to generate several files that * can be visualized using the `coarsen.m' * matlab routine. *------------------------------------------------*/ #if 0 /* debugging */ /* print out measures */ hypre_sprintf(filename, "coarsen.out.measures.%04d", iter); fp = fopen(filename, "w"); for (i = 0; i < num_variables; i++) { hypre_fprintf(fp, "%f\n", measure_array[i]); } fclose(fp); /* print out strength matrix */ hypre_sprintf(filename, "coarsen.out.strength.%04d", iter); hypre_CSRMatrixPrint(S, filename); /* print out C/F marker */ hypre_sprintf(filename, "coarsen.out.CF.%04d", iter); fp = fopen(filename, "w"); for (i = 0; i < num_variables; i++) { hypre_fprintf(fp, "%d\n", CF_marker[i]); } fclose(fp); iter++; #endif /*------------------------------------------------ * Test for convergence *------------------------------------------------*/ hypre_MPI_Allreduce(&graph_size,&global_graph_size,1,HYPRE_MPI_INT,hypre_MPI_SUM,comm); if (global_graph_size == 0) break; /*------------------------------------------------ * Pick an independent set of points with * maximal measure. *------------------------------------------------*/ if (iter || (CF_init != 1)) { hypre_BoomerAMGIndepSet(S, measure_array, graph_array, graph_size, graph_array_offd, graph_offd_size, CF_marker, CF_marker_offd); if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1);j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); if (!int_buf_data[index++] && CF_marker[elmt] > 0) { CF_marker[elmt] = 0; } } } } iter++; /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); int_buf_data[index++] = CF_marker[elmt]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } for (ig = 0; ig < graph_offd_size; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i] < 0) { /* take point out of the subgraph */ graph_offd_size--; graph_array_offd[ig] = graph_array_offd[graph_offd_size]; graph_array_offd[graph_offd_size] = i; ig--; } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d iter %d comm. and subgraph update = %f\n", my_id, iter, wall_time); } /*------------------------------------------------ * Set C_pts and apply heuristics. *------------------------------------------------*/ for (i=num_variables; i < num_variables+num_cols_offd; i++) { measure_array[i] = 0; } if (debug_flag == 3) wall_time = time_getWallclockSeconds(); for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; /*--------------------------------------------- * Heuristic: C-pts don't interpolate from * neighbors that influence them. *---------------------------------------------*/ if (CF_marker[i] > 0) { /* set to be a C-pt */ CF_marker[i] = C_PT; for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (j > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS]-1; /* decrement measures of unmarked neighbors */ if (!CF_marker[j]) { measure_array[j]--; } } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (j > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS]-1; /* decrement measures of unmarked neighbors */ if (!CF_marker_offd[j]) { measure_array[j+num_variables]--; } } } } else { /* marked dependencies */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (j < 0) j = -j-1; if (CF_marker[j] > 0) { if (S_diag_j[jS] > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS]-1; } /* IMPORTANT: consider all dependencies */ /* temporarily modify CF_marker */ CF_marker[j] = COMMON_C_PT; } else if (CF_marker[j] == SF_PT) { if (S_diag_j[jS] > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS]-1; } } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (j < 0) j = -j-1; if (CF_marker_offd[j] > 0) { if (S_offd_j[jS] > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS]-1; } /* IMPORTANT: consider all dependencies */ /* temporarily modify CF_marker */ CF_marker_offd[j] = COMMON_C_PT; } else if (CF_marker_offd[j] == SF_PT) { if (S_offd_j[jS] > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS]-1; } } } /* unmarked dependencies */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { if (S_diag_j[jS] > -1) { j = S_diag_j[jS]; break_var = 1; /* check for common C-pt */ for (kS = S_diag_i[j]; kS < S_diag_i[j+1]; kS++) { k = S_diag_j[kS]; if (k < 0) k = -k-1; /* IMPORTANT: consider all dependencies */ if (CF_marker[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_diag_j[jS] = -S_diag_j[jS]-1; measure_array[j]--; break_var = 0; break; } } if (break_var) { for (kS = S_offd_i[j]; kS < S_offd_i[j+1]; kS++) { k = S_offd_j[kS]; if (k < 0) k = -k-1; /* IMPORTANT: consider all dependencies */ if ( CF_marker_offd[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_diag_j[jS] = -S_diag_j[jS]-1; measure_array[j]--; break; } } } } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { if (S_offd_j[jS] > -1) { j = S_offd_j[jS]; /* check for common C-pt */ for (kS = S_ext_i[j]; kS < S_ext_i[j+1]; kS++) { k = S_ext_j[kS]; if (k >= 0) { /* IMPORTANT: consider all dependencies */ if (CF_marker[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_offd_j[jS] = -S_offd_j[jS]-1; measure_array[j+num_variables]--; break; } } else { kc = -k-1; if (kc > -1 && CF_marker_offd[kc] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_offd_j[jS] = -S_offd_j[jS]-1; measure_array[j+num_variables]--; break; } } } } } } /* reset CF_marker */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (j < 0) j = -j-1; if (CF_marker[j] == COMMON_C_PT) { CF_marker[j] = C_PT; } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (j < 0) j = -j-1; if (CF_marker_offd[j] == COMMON_C_PT) { CF_marker_offd[j] = C_PT; } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d CLJP phase = %f graph_size = %d nc_offd = %d\n", my_id, wall_time, graph_size, num_cols_offd); } } /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ /* Reset S_matrix */ for (i=0; i < S_diag_i[num_variables]; i++) { if (S_diag_j[i] < 0) S_diag_j[i] = -S_diag_j[i]-1; } for (i=0; i < S_offd_i[num_variables]; i++) { if (S_offd_j[i] < 0) S_offd_j[i] = -S_offd_j[i]-1; } /*for (i=0; i < num_variables; i++) if (CF_marker[i] == SF_PT) CF_marker[i] = F_PT;*/ hypre_TFree(measure_array); hypre_TFree(graph_array); if (num_cols_offd) hypre_TFree(graph_array_offd); hypre_TFree(buf_data); hypre_TFree(int_buf_data); hypre_TFree(CF_marker_offd); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); *CF_marker_ptr = CF_marker; return (ierr); } /*========================================================================== * Ruge's coarsening algorithm *==========================================================================*/ #define C_PT 1 #define F_PT -1 #define Z_PT -2 #define SF_PT -3 /* special fine points */ #define SC_PT 3 /* special coarse points */ #define UNDECIDED 0 /************************************************************** * * Ruge Coarsening routine * **************************************************************/ HYPRE_Int hypre_BoomerAMGCoarsenRuge( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int coarsen_type, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); hypre_CSRMatrix *S_ext = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_Int *S_ext_j = NULL; hypre_CSRMatrix *ST; HYPRE_Int *ST_i; HYPRE_Int *ST_j; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int ci_tilde = -1; HYPRE_Int ci_tilde_mark = -1; HYPRE_Int ci_tilde_offd = -1; HYPRE_Int ci_tilde_offd_mark = -1; HYPRE_Int *measure_array; HYPRE_Int *graph_array; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *ci_array = NULL; HYPRE_Int i, j, k, jS; HYPRE_Int ji, jj, jk, jm, index; HYPRE_Int set_empty = 1; HYPRE_Int C_i_nonempty = 0; HYPRE_Int num_nonzeros; HYPRE_Int num_procs, my_id; HYPRE_Int num_sends = 0; HYPRE_Int first_col, start; HYPRE_Int col_0, col_n; hypre_LinkList LoL_head; hypre_LinkList LoL_tail; HYPRE_Int *lists, *where; HYPRE_Int measure, new_meas; HYPRE_Int meas_type = 0; HYPRE_Int agg_2 = 0; HYPRE_Int num_left, elmt; HYPRE_Int nabor, nabor_two; HYPRE_Int ierr = 0; HYPRE_Int use_commpkg_A = 0; HYPRE_Int break_var = 0; HYPRE_Int f_pnt = F_PT; HYPRE_Real wall_time; if (coarsen_type < 0) coarsen_type = -coarsen_type; if (measure_type == 1 || measure_type == 4) meas_type = 1; if (measure_type == 4 || measure_type == 3) agg_2 = 1; /*------------------------------------------------------- * Initialize the C/F marker, LoL_head, LoL_tail arrays *-------------------------------------------------------*/ LoL_head = NULL; LoL_tail = NULL; lists = hypre_CTAlloc(HYPRE_Int, num_variables); where = hypre_CTAlloc(HYPRE_Int, num_variables); #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /*-------------------------------------------------------------- * Compute a CSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); first_col = hypre_ParCSRMatrixFirstColDiag(S); col_0 = first_col-1; col_n = col_0+num_variables; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (!comm_pkg) { use_commpkg_A = 1; comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_cols_offd) S_offd_j = hypre_CSRMatrixJ(S_offd); jS = S_i[num_variables]; ST = hypre_CSRMatrixCreate(num_variables, num_variables, jS); ST_i = hypre_CTAlloc(HYPRE_Int,num_variables+1); ST_j = hypre_CTAlloc(HYPRE_Int,jS); hypre_CSRMatrixI(ST) = ST_i; hypre_CSRMatrixJ(ST) = ST_j; /*---------------------------------------------------------- * generate transpose of S, ST *----------------------------------------------------------*/ for (i=0; i <= num_variables; i++) ST_i[i] = 0; for (i=0; i < jS; i++) { ST_i[S_j[i]+1]++; } for (i=0; i < num_variables; i++) { ST_i[i+1] += ST_i[i]; } for (i=0; i < num_variables; i++) { for (j=S_i[i]; j < S_i[i+1]; j++) { index = S_j[j]; ST_j[ST_i[index]] = i; ST_i[index]++; } } for (i = num_variables; i > 0; i--) { ST_i[i] = ST_i[i-1]; } ST_i[0] = 0; /*---------------------------------------------------------- * Compute the measures * * The measures are given by the row sums of ST. * Hence, measure_array[i] is the number of influences * of variable i. * correct actual measures through adding influences from * neighbor processors *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Int, num_variables); for (i = 0; i < num_variables; i++) { measure_array[i] = ST_i[i+1]-ST_i[i]; } /* special case for Falgout coarsening */ if (coarsen_type == 6) { f_pnt = Z_PT; coarsen_type = 1; } if (coarsen_type == 10) { f_pnt = Z_PT; coarsen_type = 11; } if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1) { if (use_commpkg_A) S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0); else S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixJ(S_ext); num_nonzeros = S_ext_i[num_cols_offd]; /*first_col = hypre_ParCSRMatrixFirstColDiag(S); col_0 = first_col-1; col_n = col_0+num_variables; */ if (meas_type) { for (i=0; i < num_nonzeros; i++) { index = S_ext_j[i] - first_col; if (index > -1 && index < num_variables) measure_array[index]++; } } } /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); /* first coarsening phase */ /************************************************************* * * Initialize the lists * *************************************************************/ CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables); num_left = 0; for (j = 0; j < num_variables; j++) { if ((S_i[j+1]-S_i[j])== 0 && (S_offd_i[j+1]-S_offd_i[j]) == 0) { CF_marker[j] = SF_PT; if (agg_2) CF_marker[j] = SC_PT; measure_array[j] = 0; } else { CF_marker[j] = UNDECIDED; num_left++; } } for (j = 0; j < num_variables; j++) { measure = measure_array[j]; if (CF_marker[j] != SF_PT && CF_marker[j] != SC_PT) { if (measure > 0) { hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, j, lists, where); } else { if (measure < 0) hypre_printf("negative measure!\n"); CF_marker[j] = f_pnt; for (k = S_i[j]; k < S_i[j+1]; k++) { nabor = S_j[k]; if (CF_marker[nabor] != SF_PT && CF_marker[nabor] != SC_PT) { if (nabor < j) { new_meas = measure_array[nabor]; if (new_meas > 0) hypre_remove_point(&LoL_head, &LoL_tail, new_meas, nabor, lists, where); new_meas = ++(measure_array[nabor]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor, lists, where); } else { new_meas = ++(measure_array[nabor]); } } } --num_left; } } } /**************************************************************** * * Main loop of Ruge-Stueben first coloring pass. * * WHILE there are still points to classify DO: * 1) find first point, i, on list with max_measure * make i a C-point, remove it from the lists * 2) For each point, j, in S_i^T, * a) Set j to be an F-point * b) For each point, k, in S_j * move k to the list in LoL with measure one * greater than it occupies (creating new LoL * entry if necessary) * 3) For each point, j, in S_i, * move j to the list in LoL with measure one * smaller than it occupies (creating new LoL * entry if necessary) * ****************************************************************/ while (num_left > 0) { index = LoL_head -> head; CF_marker[index] = C_PT; measure = measure_array[index]; measure_array[index] = 0; --num_left; hypre_remove_point(&LoL_head, &LoL_tail, measure, index, lists, where); for (j = ST_i[index]; j < ST_i[index+1]; j++) { nabor = ST_j[j]; if (CF_marker[nabor] == UNDECIDED) { CF_marker[nabor] = F_PT; measure = measure_array[nabor]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where); --num_left; for (k = S_i[nabor]; k < S_i[nabor+1]; k++) { nabor_two = S_j[k]; if (CF_marker[nabor_two] == UNDECIDED) { measure = measure_array[nabor_two]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor_two, lists, where); new_meas = ++(measure_array[nabor_two]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); } } } } for (j = S_i[index]; j < S_i[index+1]; j++) { nabor = S_j[j]; if (CF_marker[nabor] == UNDECIDED) { measure = measure_array[nabor]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where); measure_array[nabor] = --measure; if (measure > 0) hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, nabor, lists, where); else { CF_marker[nabor] = F_PT; --num_left; for (k = S_i[nabor]; k < S_i[nabor+1]; k++) { nabor_two = S_j[k]; if (CF_marker[nabor_two] == UNDECIDED) { new_meas = measure_array[nabor_two]; hypre_remove_point(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); new_meas = ++(measure_array[nabor_two]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); } } } } } } hypre_TFree(measure_array); hypre_CSRMatrixDestroy(ST); if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen 1st pass = %f\n", my_id, wall_time); } hypre_TFree(lists); hypre_TFree(where); hypre_TFree(LoL_head); hypre_TFree(LoL_tail); for (i=0; i < num_variables; i++) if (CF_marker[i] == SC_PT) CF_marker[i] = C_PT; if (coarsen_type == 11) { *CF_marker_ptr = CF_marker; if (meas_type && num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return 0; } /* second pass, check fine points for coarse neighbors for coarsen_type = 2, the second pass includes off-processore boundary points */ /*--------------------------------------------------- * Initialize the graph array *---------------------------------------------------*/ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables); for (i = 0; i < num_variables; i++) { graph_array[i] = -1; } if (debug_flag == 3) wall_time = time_getWallclockSeconds(); if (coarsen_type == 2) { /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int,num_cols_offd); for (i=0; i < num_cols_offd; i++) ci_array[i] = -1; for (i=0; i < num_variables; i++) { if (ci_tilde_mark != i) ci_tilde = -1; if (ci_tilde_offd_mark != i) ci_tilde_offd = -1; if (CF_marker[i] == -1) { break_var = 1; for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) graph_array[j] = i; } for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] > 0) ci_array[j] = i; } for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j+1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } if (set_empty) { for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++) { index = S_offd_j[jj]; if (ci_array[index] == i) { set_empty = 0; break; } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break_var = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break_var = 0; break; } } } } if (break_var) { for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] == -1) { set_empty = 1; for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++) { index = S_ext_j[jj]; if (index > col_0 && index < col_n) /* index interior */ { if (graph_array[index-first_col] == i) { set_empty = 0; break; } } else { jk = hypre_BinarySearch(col_map_offd,index,num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde_offd = j; ci_tilde_offd_mark = i; CF_marker_offd[j] = 1; C_i_nonempty = 1; i--; break; } } } } } } } } else { for (i=0; i < num_variables; i++) { if (ci_tilde_mark != i) ci_tilde = -1; if (CF_marker[i] == -1) { for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) graph_array[j] = i; } for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j+1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } C_i_nonempty = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break; } } } } } } } if (debug_flag == 3 && coarsen_type != 2) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen 2nd pass = %f\n", my_id, wall_time); } /* third pass, check boundary fine points for coarse neighbors */ if (coarsen_type == 3 || coarsen_type == 4) { if (debug_flag == 3) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int,num_cols_offd); for (i=0; i < num_cols_offd; i++) ci_array[i] = -1; } if (coarsen_type > 1 && coarsen_type < 5) { for (i=0; i < num_variables; i++) graph_array[i] = -1; for (i=0; i < num_cols_offd; i++) { if (ci_tilde_mark != i) ci_tilde = -1; if (ci_tilde_offd_mark != i) ci_tilde_offd = -1; if (CF_marker_offd[i] == -1) { for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++) { j = S_ext_j[ji]; if (j > col_0 && j < col_n) { j = j - first_col; if (CF_marker[j] > 0) graph_array[j] = i; } else { jj = hypre_BinarySearch(col_map_offd,j,num_cols_offd); if (jj != -1 && CF_marker_offd[jj] > 0) ci_array[jj] = i; } } for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++) { j = S_ext_j[ji]; if (j > col_0 && j < col_n) { j = j - first_col; if ( CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j+1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++) { index = S_offd_j[jj]; if (ci_array[index] == i) { set_empty = 0; break; } } if (set_empty) { if (C_i_nonempty) { CF_marker_offd[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break; } } } } else { jm = hypre_BinarySearch(col_map_offd,j,num_cols_offd); if (jm != -1 && CF_marker_offd[jm] == -1) { set_empty = 1; for (jj = S_ext_i[jm]; jj < S_ext_i[jm+1]; jj++) { index = S_ext_j[jj]; if (index > col_0 && index < col_n) { if (graph_array[index-first_col] == i) { set_empty = 0; break; } } else { jk = hypre_BinarySearch(col_map_offd,index,num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker_offd[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde_offd = jm; ci_tilde_offd_mark = i; CF_marker_offd[jm] = 1; C_i_nonempty = 1; i--; break; } } } } } } } /*------------------------------------------------ * Send boundary data for CF_marker back *------------------------------------------------*/ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } /* only CF_marker entries from larger procs are accepted if coarsen_type = 4 coarse points are not overwritten */ index = 0; if (coarsen_type != 4) { for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id) { for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] = int_buf_data[index++]; } else { index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start; } } } else { for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id) { for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); if (CF_marker[elmt] != 1) CF_marker[elmt] = int_buf_data[index]; index++; } } else { index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start; } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; if (coarsen_type == 4) hypre_printf("Proc = %d Coarsen 3rd pass = %f\n", my_id, wall_time); if (coarsen_type == 3) hypre_printf("Proc = %d Coarsen 3rd pass = %f\n", my_id, wall_time); if (coarsen_type == 2) hypre_printf("Proc = %d Coarsen 2nd pass = %f\n", my_id, wall_time); } } if (coarsen_type == 5) { /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int,num_cols_offd); for (i=0; i < num_cols_offd; i++) ci_array[i] = -1; for (i=0; i < num_variables; i++) graph_array[i] = -1; for (i=0; i < num_variables; i++) { if (CF_marker[i] == -1 && (S_offd_i[i+1]-S_offd_i[i]) > 0) { break_var = 1; for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) graph_array[j] = i; } for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] > 0) ci_array[j] = i; } for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] == -1) { set_empty = 1; for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++) { index = S_ext_j[jj]; if (index > col_0 && index < col_n) /* index interior */ { if (graph_array[index-first_col] == i) { set_empty = 0; break; } } else { jk = hypre_BinarySearch(col_map_offd,index,num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = -2; C_i_nonempty = 0; break; } else { C_i_nonempty = 1; i--; break; } } } } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen special points = %f\n", my_id, wall_time); } } /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ /*if (coarsen_type != 1) { */ hypre_TFree(CF_marker_offd); hypre_TFree(int_buf_data); hypre_TFree(ci_array); /*} */ hypre_TFree(graph_array); if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1) hypre_CSRMatrixDestroy(S_ext); *CF_marker_ptr = CF_marker; return (ierr); } HYPRE_Int hypre_BoomerAMGCoarsenFalgout( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { HYPRE_Int ierr = 0; /*------------------------------------------------------- * Perform Ruge coarsening followed by CLJP coarsening *-------------------------------------------------------*/ ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 6, debug_flag, CF_marker_ptr); ierr += hypre_BoomerAMGCoarsen (S, A, 1, debug_flag, CF_marker_ptr); return (ierr); } HYPRE_Int hypre_BoomerAMGCoarsenHMIS( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { HYPRE_Int ierr = 0; /*------------------------------------------------------- * Perform Ruge coarsening followed by CLJP coarsening *-------------------------------------------------------*/ ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 10, debug_flag, CF_marker_ptr); ierr += hypre_BoomerAMGCoarsenPMIS (S, A, 1, debug_flag, CF_marker_ptr); return (ierr); } /*--------------------------------------------------------------------------*/ #define C_PT 1 #define F_PT -1 #define SF_PT -3 #define COMMON_C_PT 2 #define Z_PT -2 /* begin HANS added */ /************************************************************** * * Modified Independent Set Coarsening routine * (don't worry about strong F-F connections * without a common C point) * **************************************************************/ HYPRE_Int hypre_BoomerAMGCoarsenPMIS( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int CF_init, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PMIS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd = 0; /* hypre_CSRMatrix *S_ext; HYPRE_Int *S_ext_i; HYPRE_Int *S_ext_j; */ HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data; HYPRE_Real *buf_data; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd; HYPRE_Real *measure_array; HYPRE_Int *graph_array; HYPRE_Int *graph_array_offd; HYPRE_Int graph_size; HYPRE_Int graph_offd_size; HYPRE_Int global_graph_size; HYPRE_Int i, j, jj, jS, ig; HYPRE_Int index, start, my_id, num_procs, jrow, cnt, elmt; HYPRE_Int ierr = 0; HYPRE_Real wall_time; HYPRE_Int iter = 0; HYPRE_Int *prefix_sum_workspace; #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /******************************************************************************* BEFORE THE INDEPENDENT SET COARSENING LOOP: measure_array: calculate the measures, and communicate them (this array contains measures for both local and external nodes) CF_marker, CF_marker_offd: initialize CF_marker (separate arrays for local and external; 0=unassigned, negative=F point, positive=C point) ******************************************************************************/ /*-------------------------------------------------------------- * Use the ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: S_data is not used; in stead, only strong columns are retained * in S_j, which can then be used like S_data *----------------------------------------------------------------*/ /*S_ext = NULL; */ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); num_cols_offd = hypre_CSRMatrixNumCols(S_offd); S_diag_j = hypre_CSRMatrixJ(S_diag); if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } /*---------------------------------------------------------- * Compute the measures * * The measures are currently given by the column sums of S. * Hence, measure_array[i] is the number of influences * of variable i. * * The measures are augmented by a random number * between 0 and 1. *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Real, num_variables+num_cols_offd); /* first calculate the local part of the sums for the external nodes */ #ifdef HYPRE_USING_OPENMP HYPRE_Int *measure_array_temp = hypre_CTAlloc(HYPRE_Int, num_variables+num_cols_offd); #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < S_offd_i[num_variables]; i++) { #pragma omp atomic measure_array_temp[num_variables + S_offd_j[i]]++; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < num_cols_offd; i++) { measure_array[i + num_variables] = measure_array_temp[i + num_variables]; } #else for (i=0; i < S_offd_i[num_variables]; i++) { measure_array[num_variables + S_offd_j[i]] += 1.0; } #endif // HYPRE_USING_OPENMP /* now send those locally calculated values for the external nodes to the neighboring processors */ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); /* calculate the local part for the local nodes */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < S_diag_i[num_variables]; i++) { #pragma omp atomic measure_array_temp[S_diag_j[i]]++; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < num_variables; i++) { measure_array[i] = measure_array_temp[i]; } hypre_TFree(measure_array_temp); #else for (i=0; i < S_diag_i[num_variables]; i++) { measure_array[S_diag_j[i]] += 1.0; } #endif // HYPRE_USING_OPENMP /* finish the communication */ if (num_procs > 1) hypre_ParCSRCommHandleDestroy(comm_handle); /* now add the externally calculated part of the local nodes to the local nodes */ index = 0; for (i=0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] += buf_data[index++]; } /* set the measures of the external nodes to zero */ for (i=num_variables; i < num_variables+num_cols_offd; i++) { measure_array[i] = 0; } /* this augments the measures with a random number between 0 and 1 */ /* (only for the local part) */ /* this augments the measures */ if (CF_init == 2 || CF_init == 4) hypre_BoomerAMGIndepSetInit(S, measure_array, 1); else hypre_BoomerAMGIndepSetInit(S, measure_array, 0); /*--------------------------------------------------- * Initialize the graph arrays, and CF_marker arrays *---------------------------------------------------*/ /* first the off-diagonal part of the graph array */ if (num_cols_offd) graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); else graph_array_offd = NULL; for (ig = 0; ig < num_cols_offd; ig++) graph_array_offd[ig] = ig; graph_offd_size = num_cols_offd; /* now the local part of the graph array, and the local CF_marker array */ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables); if (CF_init==1) { CF_marker = *CF_marker_ptr; cnt = 0; for (i=0; i < num_variables; i++) { if ( (S_offd_i[i+1]-S_offd_i[i]) > 0 || CF_marker[i] == -1) { CF_marker[i] = 0; } if ( CF_marker[i] == Z_PT) { if (measure_array[i] >= 1.0 || (S_diag_i[i+1]-S_diag_i[i]) > 0) { CF_marker[i] = 0; graph_array[cnt++] = i; } else { CF_marker[i] = F_PT; } } else if (CF_marker[i] == SF_PT) measure_array[i] = 0; else graph_array[cnt++] = i; } } else { CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables); cnt = 0; for (i=0; i < num_variables; i++) { CF_marker[i] = 0; if ( (S_diag_i[i+1]-S_diag_i[i]) == 0 && (S_offd_i[i+1]-S_offd_i[i]) == 0) { CF_marker[i] = SF_PT; /* an isolated fine grid */ if (CF_init == 3 || CF_init == 4) CF_marker[i] = C_PT; measure_array[i] = 0; } else graph_array[cnt++] = i; } } graph_size = cnt; /* now the off-diagonal part of CF_marker */ if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); else CF_marker_offd = NULL; for (i=0; i < num_cols_offd; i++) CF_marker_offd[i] = 0; /*------------------------------------------------ * Communicate the local measures, which are complete, to the external nodes *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); buf_data[index++] = measure_array[jrow]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, &measure_array[num_variables]); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Initialize CLJP phase = %f\n", my_id, wall_time); } HYPRE_Int *graph_array2 = hypre_CTAlloc(HYPRE_Int, num_variables); HYPRE_Int *graph_array_offd2 = NULL; if (num_cols_offd) graph_array_offd2 = hypre_CTAlloc(HYPRE_Int, num_cols_offd); /******************************************************************************* THE INDEPENDENT SET COARSENING LOOP: ******************************************************************************/ /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ while (1) { /* stop the coarsening if nothing left to be coarsened */ hypre_MPI_Allreduce(&graph_size,&global_graph_size,1,HYPRE_MPI_INT,hypre_MPI_SUM,comm); if (global_graph_size == 0) break; /* hypre_printf("\n"); hypre_printf("*** MIS iteration %d\n",iter); hypre_printf("graph_size remaining %d\n",graph_size);*/ /*------------------------------------------------ * Pick an independent set of points with * maximal measure. At the end, CF_marker is complete, but still needs to be communicated to CF_marker_offd *------------------------------------------------*/ if (!CF_init || iter) { /*hypre_BoomerAMGIndepSet(S, measure_array, graph_array, graph_size, graph_array_offd, graph_offd_size, CF_marker, CF_marker_offd);*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if (measure_array[i] > 1) { CF_marker[i] = 1; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_offd_size; ig++) { i = graph_array_offd[ig]; if (measure_array[i+num_variables] > 1) { CF_marker_offd[i] = 1; } } /*------------------------------------------------------- * Remove nodes from the initial independent set *-------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if (measure_array[i] > 1) { for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (measure_array[j] > 1) { if (measure_array[i] > measure_array[j]) CF_marker[j] = 0; else if (measure_array[j] > measure_array[i]) CF_marker[i] = 0; } } /* for each local neighbor j of i */ for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { jj = S_offd_j[jS]; j = num_variables+jj; if (measure_array[j] > 1) { if (measure_array[i] > measure_array[j]) CF_marker_offd[jj] = 0; else if (measure_array[j] > measure_array[i]) CF_marker[i] = 0; } } } /* for each node with measure > 1 */ } /* for each node i */ /*------------------------------------------------ * Exchange boundary data for CF_marker: send internal points to external points *------------------------------------------------*/ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); if (!int_buf_data[index] && CF_marker[elmt] > 0) { CF_marker[elmt] = 0; index++; } else { int_buf_data[index++] = CF_marker[elmt]; } } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } iter++; /*------------------------------------------------ * Set C-pts and F-pts. *------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; /*--------------------------------------------- * If the measure of i is smaller than 1, then * make i and F point (because it does not influence * any other point) *---------------------------------------------*/ if(measure_array[i]<1.) CF_marker[i]= F_PT; /*--------------------------------------------- * First treat the case where point i is in the * independent set: make i a C point, *---------------------------------------------*/ if (CF_marker[i] > 0) CF_marker[i] = C_PT; /*--------------------------------------------- * Now treat the case where point i is not in the * independent set: loop over * all the points j that influence equation i; if * j is a C point, then make i an F point. *---------------------------------------------*/ else { /* first the local part */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { /* j is the column number, or the local number of the point influencing i */ j = S_diag_j[jS]; if (CF_marker[j] > 0) /* j is a C-point */ CF_marker[i] = F_PT; } /* now the external part */ for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (CF_marker_offd[j] > 0) /* j is a C-point */ CF_marker[i] = F_PT; } } /* end else */ } /* end first loop over graph */ /* now communicate CF_marker to CF_marker_offd, to make sure that new external F points are known on this processor */ /*------------------------------------------------ * Exchange boundary data for CF_marker: send internal points to external points *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*------------------------------------------------ * Update subgraph *------------------------------------------------*/ /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1)); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ig,i) #endif { HYPRE_Int private_graph_size_cnt = 0; HYPRE_Int private_graph_offd_size_cnt = 0; HYPRE_Int ig_begin, ig_end; hypre_GetSimpleThreadPartition(&ig_begin, &ig_end, graph_size); HYPRE_Int ig_offd_begin, ig_offd_end; hypre_GetSimpleThreadPartition(&ig_offd_begin, &ig_offd_end, graph_offd_size); for (ig = ig_begin; ig < ig_end; ig++) { i = graph_array[ig]; if (CF_marker[i]!=0) /* C or F point */ { /* the independent set subroutine needs measure 0 for removed nodes */ measure_array[i] = 0; } else { private_graph_size_cnt++; } } for (ig = ig_offd_begin; ig < ig_offd_end; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i]!=0) /* C of F point */ { /* the independent set subroutine needs measure 0 for removed nodes */ measure_array[i + num_variables] = 0; } else { private_graph_offd_size_cnt++; } } hypre_prefix_sum_pair(&private_graph_size_cnt, &graph_size, &private_graph_offd_size_cnt, &graph_offd_size, prefix_sum_workspace); for (ig = ig_begin; ig < ig_end; ig++) { i = graph_array[ig]; if (CF_marker[i]==0) { graph_array2[private_graph_size_cnt++] = i; } } for (ig = ig_offd_begin; ig < ig_offd_end; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i]==0) { graph_array_offd2[private_graph_offd_size_cnt++] = i; } } } /* omp parallel */ HYPRE_Int *temp = graph_array; graph_array = graph_array2; graph_array2 = temp; temp = graph_array_offd; graph_array_offd = graph_array_offd2; graph_array_offd2 = temp; hypre_TFree(prefix_sum_workspace); } /* end while */ /* hypre_printf("*** MIS iteration %d\n",iter); hypre_printf("graph_size remaining %d\n",graph_size); hypre_printf("num_cols_offd %d\n",num_cols_offd); for (i=0;i<num_variables;i++) { if(CF_marker[i]==1) hypre_printf("node %d CF %d\n",i,CF_marker[i]); }*/ /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ hypre_TFree(measure_array); hypre_TFree(graph_array); hypre_TFree(graph_array2); hypre_TFree(graph_array_offd2); if (num_cols_offd) hypre_TFree(graph_array_offd); hypre_TFree(buf_data); hypre_TFree(int_buf_data); hypre_TFree(CF_marker_offd); /*if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);*/ *CF_marker_ptr = CF_marker; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PMIS] += hypre_MPI_Wtime(); #endif return (ierr); }
residual_based_bdf_custom_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUAL_BASED_BDF_CUSTOM_SCHEME ) #define KRATOS_RESIDUAL_BASED_BDF_CUSTOM_SCHEME /* System includes */ /* External includes */ /* Project includes */ #include "solving_strategies/schemes/residual_based_bdf_scheme.h" #include "includes/variables.h" #include "includes/kratos_parameters.h" #include "includes/checks.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedBDFCustomScheme * @ingroup KratosCore * @brief BDF integration scheme (for dynamic problems) * @details The second order Backward Differentiation Formula (BDF) method is a two step second order accurate method. * This scheme is a generalization of the only displacement scheme, where any list of variables and its derivatives can be considered instead * Look at the base class for more details * @see ResidualBasedBDFScheme * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace> class ResidualBasedBDFCustomScheme : public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFCustomScheme ); typedef Scheme<TSparseSpace,TDenseSpace> BaseType; typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType; typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType; typedef ResidualBasedBDFCustomScheme<TSparseSpace, TDenseSpace> ClassType; typedef typename ImplicitBaseType::TDataType TDataType; typedef typename ImplicitBaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType; typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType; typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename BaseType::Pointer BaseTypePointer; ///@} ///@name Life Cycle ///@{ /** * @brief Constructor. The BDF method * @param ThisParameters The parameters containing the list of variables to consider * @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives */ explicit ResidualBasedBDFCustomScheme(Parameters ThisParameters) :BDFBaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); // Creating variables list CreateVariablesList(ThisParameters); } /** * @brief Constructor. The BDF method * @param Order The integration order * @param ThisParameters The parameters containing the list of variables to consider * @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives */ explicit ResidualBasedBDFCustomScheme( const std::size_t Order = 2, Parameters ThisParameters = Parameters(R"({})") ) :BDFBaseType(Order) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); // Creating variables list CreateVariablesList(ThisParameters); } /** Copy Constructor. */ explicit ResidualBasedBDFCustomScheme(ResidualBasedBDFCustomScheme& rOther) :BDFBaseType(rOther) ,mDoubleVariable(rOther.mDoubleVariable) ,mFirstDoubleDerivatives(rOther.mFirstDoubleDerivatives) ,mSecondDoubleDerivatives(rOther.mSecondDoubleDerivatives) { } /** * Clone */ BaseTypePointer Clone() override { return BaseTypePointer( new ResidualBasedBDFCustomScheme(*this) ); } /** Destructor. */ ~ResidualBasedBDFCustomScheme () override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create(Parameters ThisParameters) const override { return Kratos::make_shared<ClassType>(ThisParameters); } /** * @brief This is the place to initialize the Scheme. * @details This is intended to be called just once when the strategy is initialized * @param rModelPart The model part of the problem to solve */ void Initialize(ModelPart& rModelPart) override { KRATOS_TRY BDFBaseType::Initialize(rModelPart); // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting dimension KRATOS_WARNING_IF("ResidualBasedBDFCustomScheme", !r_current_process_info.Has(DOMAIN_SIZE)) << "DOMAIN_SIZE not defined. Please define DOMAIN_SIZE. 3D case will be assumed" << std::endl; const std::size_t domain_size = r_current_process_info.Has(DOMAIN_SIZE) ? r_current_process_info.GetValue(DOMAIN_SIZE) : 3; if (domain_size != mDomainSize) { const std::size_t total_number_of_variables = mDoubleVariable.size(); // We remove the third component if (domain_size == 2) { const std::size_t number_variables_added = total_number_of_variables/3; for (std::size_t i = 0; i < number_variables_added; ++i) { mDoubleVariable.erase(mDoubleVariable.begin() + (2 + 2 * i)); mFirstDoubleDerivatives.erase(mDoubleVariable.begin() + (2 + 2 * i)); mSecondDoubleDerivatives.erase(mDoubleVariable.begin() + (2 + 2 * i)); } } else if (domain_size == 3) { // We need to add the third component const std::size_t number_variables_added = total_number_of_variables/2; for (std::size_t i = 0; i < number_variables_added; ++i) { const std::string variable_name = ((*(mDoubleVariable.begin() + 2 * i))->GetSourceVariable()).Name(); const auto& r_var_z = KratosComponents<Variable<double>>::Get(variable_name + "_Z"); mDoubleVariable.push_back(&r_var_z); mFirstDoubleDerivatives.push_back(&(r_var_z.GetTimeDerivative())); mSecondDoubleDerivatives.push_back(&((r_var_z.GetTimeDerivative()).GetTimeDerivative())); } } else { KRATOS_ERROR << "DOMAIN_SIZE can onbly be 2 or 3. It is: " << domain_size << std::endl; } mDomainSize = domain_size; } KRATOS_CATCH("") } /** * @brief It initializes time step solution. Only for reasons if the time step solution is restarted * @param rModelPart The model part of the problem to solve * @param rA LHS matrix * @param rDx Incremental update of primary variables * @param rb RHS Vector */ void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; BDFBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); // Updating time derivatives (nodally for efficiency) const int num_nodes = static_cast<int>( rModelPart.Nodes().size() ); const auto it_node_begin = rModelPart.Nodes().begin(); // Auxiliar fixed value bool fixed = false; #pragma omp parallel for private(fixed) for(int i = 0; i < num_nodes; ++i) { auto it_node = it_node_begin + i; std::size_t counter = 0; for (auto p_var : mDoubleVariable) { fixed = false; // Derivatives const auto& dvar = *mFirstDoubleDerivatives[counter]; const auto& d2var = *mSecondDoubleDerivatives[counter]; if (it_node->HasDofFor(d2var)) { if (it_node->IsFixed(d2var)) { it_node->Fix(*p_var); fixed = true; } } if (it_node->HasDofFor(dvar)) { if (it_node->IsFixed(dvar) && !fixed) { it_node->Fix(*p_var); } } counter++; } } KRATOS_CATCH("ResidualBasedBDFCustomScheme.InitializeSolutionStep"); } /** * @brief Performing the prediction of the solution * @details It predicts the solution for the current step x = xold + vold * Dt * @param rModelPart The model of the problem to solve * @param rDofSet set of all primary variables * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) override { KRATOS_TRY; // Getting process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting delta time const double delta_time = r_current_process_info[DELTA_TIME]; // Updating time derivatives (nodally for efficiency) const int num_nodes = static_cast<int>( rModelPart.Nodes().size() ); // Getting first node iterator const auto it_node_begin = rModelPart.Nodes().begin(); #pragma omp parallel for for(int i = 0; i< num_nodes; ++i) { auto it_node = it_node_begin + i; std::size_t counter = 0; for (auto p_var : mDoubleVariable) { // Derivatives const auto& dvar = *mFirstDoubleDerivatives[counter]; const auto& d2var = *mSecondDoubleDerivatives[counter]; ComputePredictComponent(it_node, *p_var, dvar, d2var, delta_time); counter++; } // Updating time derivatives UpdateFirstDerivative(it_node); UpdateSecondDerivative(it_node); } KRATOS_CATCH( "" ); } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. * @details Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model of the problem to solve * @return Zero means all ok */ int Check(const ModelPart& rModelPart) const override { KRATOS_TRY; const int err = BDFBaseType::Check(rModelPart); if(err!=0) return err; // Check that variables are correctly allocated for(auto& r_node : rModelPart.Nodes()) { for ( auto p_var : mDoubleVariable) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*p_var), r_node) for ( auto p_var : mFirstDoubleDerivatives) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*p_var), r_node) for ( auto p_var : mSecondDoubleDerivatives) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*p_var), r_node) for ( auto p_var : mDoubleVariable) KRATOS_CHECK_DOF_IN_NODE((*p_var), r_node) } KRATOS_CATCH( "" ); return 0; } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "bdf_scheme", "domain_size" : 3, "integration_order" : 2, "solution_variables" : ["DISPLACEMENT"] })"); // Getting base class default parameters const Parameters base_default_parameters = BDFBaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "bdf_scheme"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedBDFCustomScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ std::vector<const Variable<double>*> mDoubleVariable; /// The double variables std::vector<const Variable<double>*> mFirstDoubleDerivatives; /// The first derivative double variable to compute std::vector<const Variable<double>*> mSecondDoubleDerivatives; /// The second derivative double variable to compute ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief Updating first time derivative (velocity) * @param itNode the node interator */ inline void UpdateFirstDerivative(NodesArrayType::iterator itNode) override { // DOUBLES std::size_t counter = 0; for (auto p_var : mDoubleVariable) { double& dotun0 = itNode->FastGetSolutionStepValue(*mFirstDoubleDerivatives[counter]); dotun0 = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(*p_var); for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) dotun0 += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(*p_var, i_order); counter++; } } /** * @brief Updating second time derivative (acceleration) * @param itNode the node interator */ inline void UpdateSecondDerivative(NodesArrayType::iterator itNode) override { // DOUBLES std::size_t counter = 0; for (auto p_var : mFirstDoubleDerivatives) { double& dot2un0 = itNode->FastGetSolutionStepValue(*mSecondDoubleDerivatives[counter]); dot2un0 = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(*p_var); for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) dot2un0 += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(*p_var, i_order); counter++; } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ std::size_t mDomainSize = 3; /// This auxiliar variable is used to store the domain size of the problem ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method reduces the code duplication for each components when computing the prediction * @param itNode The node iterator of the node currently being computed * @param rVariable The variable currently being integrated * @param rDerivedVariable The first time derivative of the current variable * @param rDerived2Variable The second time derivative of the current variable * @param DeltaTime The increment of time for the time integration */ template<class TClassVar> void ComputePredictComponent( NodesArrayType::iterator itNode, const TClassVar& rVariable, const TClassVar& rDerivedVariable, const TClassVar& rDerived2Variable, const double DeltaTime ) { // Values const double dot2un1 = itNode->FastGetSolutionStepValue(rDerived2Variable, 1); const double dotun1 = itNode->FastGetSolutionStepValue(rDerivedVariable, 1); const double un1 = itNode->FastGetSolutionStepValue(rVariable, 1); const double dot2un0 = itNode->FastGetSolutionStepValue(rDerived2Variable); double& dotun0 = itNode->FastGetSolutionStepValue(rDerivedVariable); double& un0 = itNode->FastGetSolutionStepValue(rVariable); if (itNode->HasDofFor(rDerived2Variable) && itNode->IsFixed(rDerived2Variable)) { dotun0 = dot2un0; for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) dotun0 -= BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(rDerivedVariable, i_order); dotun0 /= BDFBaseType::mBDF[0]; un0 = dotun0; for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) un0 -= BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(rVariable, i_order); un0 /= BDFBaseType::mBDF[0]; } else if (itNode->HasDofFor(rDerivedVariable) && itNode->IsFixed(rDerivedVariable)) { un0 = dotun0; for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order) un0 -= BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(rVariable, i_order); un0 /= BDFBaseType::mBDF[0]; } else if (!itNode->IsFixed(rVariable)) { un0 = un1 + DeltaTime * dotun1 + 0.5 * std::pow(DeltaTime, 2) * dot2un1; } } /** * @brief This method creates the list of variables * @param ThisParameters The configuration parameters */ void CreateVariablesList(Parameters ThisParameters) { const std::size_t n_variables = ThisParameters["solution_variables"].size(); // The current dimension mDomainSize = ThisParameters["domain_size"].GetInt(); const auto variable_names = ThisParameters["solution_variables"].GetStringArray(); for (std::size_t p_var = 0; p_var < n_variables; ++p_var){ const std::string& variable_name = variable_names[p_var]; if(KratosComponents<Variable<double>>::Has(variable_name)){ const auto& r_var = KratosComponents<Variable<double>>::Get(variable_name); mDoubleVariable.push_back(&r_var); mFirstDoubleDerivatives.push_back(&(r_var.GetTimeDerivative())); mSecondDoubleDerivatives.push_back(&((r_var.GetTimeDerivative()).GetTimeDerivative())); } else if (KratosComponents< Variable< array_1d< double, 3> > >::Has(variable_name)) { // Components const auto& r_var_x = KratosComponents<Variable<double>>::Get(variable_name+"_X"); const auto& r_var_y = KratosComponents<Variable<double>>::Get(variable_name+"_Y"); mDoubleVariable.push_back(&r_var_x); mDoubleVariable.push_back(&r_var_y); mFirstDoubleDerivatives.push_back(&(r_var_x.GetTimeDerivative())); mFirstDoubleDerivatives.push_back(&(r_var_y.GetTimeDerivative())); mSecondDoubleDerivatives.push_back(&((r_var_x.GetTimeDerivative()).GetTimeDerivative())); mSecondDoubleDerivatives.push_back(&((r_var_y.GetTimeDerivative()).GetTimeDerivative())); if (mDomainSize == 3) { const auto& r_var_z = KratosComponents<Variable<double>>::Get(variable_name+"_Z"); mDoubleVariable.push_back(&r_var_z); mFirstDoubleDerivatives.push_back(&(r_var_z.GetTimeDerivative())); mSecondDoubleDerivatives.push_back(&((r_var_z.GetTimeDerivative()).GetTimeDerivative())); } } else { KRATOS_ERROR << "Only double and vector variables are allowed in the variables list." ; } } } ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedBDFCustomScheme */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_BDF_CUSTOM_SCHEME defined */
collision_matrix.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "collision_matrix.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include "phonoc_array.h" #include "phonoc_utils.h" static void get_collision_matrix( double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const long *rot_grid_points, const long num_ir_gp, const long num_rot, const double *rotations_cartesian, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency); static void get_reducible_collision_matrix( double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency); static long get_inv_sinh(double *inv_sinh, const long gp, const double temperature, const double *frequencies, const long triplet[3], const long *triplets_map, const long *map_q, const long num_band, const double cutoff_frequency); static long *create_gp2tp_map(const long *triplets_map, const long num_gp); void col_get_collision_matrix( double *collision_matrix, const Darray *fc3_normal_squared, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long *map_q, const long *rot_grid_points, const double *rotations_cartesian, const double *g, const long num_ir_gp, const long num_gp, const long num_rot, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long num_triplets, num_band0, num_band; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; get_collision_matrix(collision_matrix, fc3_normal_squared->data, num_band0, num_band, frequencies, triplets, triplets_map, num_gp, map_q, rot_grid_points, num_ir_gp, num_rot, rotations_cartesian, g + 2 * num_triplets * num_band0 * num_band * num_band, temperature, unit_conversion_factor, cutoff_frequency); } void col_get_reducible_collision_matrix( double *collision_matrix, const Darray *fc3_normal_squared, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long *map_q, const double *g, const long num_gp, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long num_triplets, num_band, num_band0; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; get_reducible_collision_matrix( collision_matrix, fc3_normal_squared->data, num_band0, num_band, frequencies, triplets, triplets_map, num_gp, map_q, g + 2 * num_triplets * num_band0 * num_band * num_band, temperature, unit_conversion_factor, cutoff_frequency); } static void get_collision_matrix( double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const long *rot_grid_points, const long num_ir_gp, const long num_rot, const double *rotations_cartesian, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long i, j, k, l, m, n, ti, r_gp, swapped; long *gp2tp_map; double collision; double *inv_sinh; gp2tp_map = create_gp2tp_map(triplets_map, num_gp); #ifdef _OPENMP #pragma omp parallel for private(j, k, l, m, n, ti, r_gp, collision, inv_sinh) #endif for (i = 0; i < num_ir_gp; i++) { inv_sinh = (double *)malloc(sizeof(double) * num_band); for (j = 0; j < num_rot; j++) { r_gp = rot_grid_points[i * num_rot + j]; ti = gp2tp_map[triplets_map[r_gp]]; swapped = get_inv_sinh(inv_sinh, r_gp, temperature, frequencies, triplets[ti], triplets_map, map_q, num_band, cutoff_frequency); for (k = 0; k < num_band0; k++) { for (l = 0; l < num_band; l++) { collision = 0; for (m = 0; m < num_band; m++) { if (swapped) { collision += fc3_normal_squared[ti * num_band0 * num_band * num_band + k * num_band * num_band + m * num_band + l] * g[ti * num_band0 * num_band * num_band + k * num_band * num_band + m * num_band + l] * inv_sinh[m] * unit_conversion_factor; } else { collision += fc3_normal_squared[ti * num_band0 * num_band * num_band + k * num_band * num_band + l * num_band + m] * g[ti * num_band0 * num_band * num_band + k * num_band * num_band + l * num_band + m] * inv_sinh[m] * unit_conversion_factor; } } for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { collision_matrix[k * 3 * num_ir_gp * num_band * 3 + m * num_ir_gp * num_band * 3 + i * num_band * 3 + l * 3 + n] += collision * rotations_cartesian[j * 9 + m * 3 + n]; } } } } } free(inv_sinh); inv_sinh = NULL; } free(gp2tp_map); gp2tp_map = NULL; } static void get_reducible_collision_matrix( double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long i, j, k, l, ti, swapped; long *gp2tp_map; double collision; double *inv_sinh; gp2tp_map = create_gp2tp_map(triplets_map, num_gp); #ifdef _OPENMP #pragma omp parallel for private(j, k, l, ti, collision, inv_sinh) #endif for (i = 0; i < num_gp; i++) { inv_sinh = (double *)malloc(sizeof(double) * num_band); ti = gp2tp_map[triplets_map[i]]; swapped = get_inv_sinh(inv_sinh, i, temperature, frequencies, triplets[ti], triplets_map, map_q, num_band, cutoff_frequency); for (j = 0; j < num_band0; j++) { for (k = 0; k < num_band; k++) { collision = 0; for (l = 0; l < num_band; l++) { if (swapped) { collision += fc3_normal_squared[ti * num_band0 * num_band * num_band + j * num_band * num_band + l * num_band + k] * g[ti * num_band0 * num_band * num_band + j * num_band * num_band + l * num_band + k] * inv_sinh[l] * unit_conversion_factor; } else { collision += fc3_normal_squared[ti * num_band0 * num_band * num_band + j * num_band * num_band + k * num_band + l] * g[ti * num_band0 * num_band * num_band + j * num_band * num_band + k * num_band + l] * inv_sinh[l] * unit_conversion_factor; } } collision_matrix[j * num_gp * num_band + i * num_band + k] += collision; } } free(inv_sinh); inv_sinh = NULL; } free(gp2tp_map); gp2tp_map = NULL; } static long get_inv_sinh(double *inv_sinh, const long gp, const double temperature, const double *frequencies, const long triplet[3], const long *triplets_map, const long *map_q, const long num_band, const double cutoff_frequency) { long i, gp2, swapped; double f; /* This assumes the algorithm of get_ir_triplets_at_q_perm_q1q2, */ /* where defined triplets_map[gp] == triplets_map[map_q[gp]]. */ /* If triplets_map[map_q[gp]] != map_q[gp], q1 and q2 are permuted. */ if (triplets_map[gp] == map_q[gp]) { gp2 = triplet[2]; swapped = 0; } else { gp2 = triplet[1]; swapped = 1; } for (i = 0; i < num_band; i++) { f = frequencies[gp2 * num_band + i]; if (f > cutoff_frequency) { inv_sinh[i] = phonoc_inv_sinh_occupation(f, temperature); } else { inv_sinh[i] = 0; } } return swapped; } /* Symmetrically independent triplets are indexed. */ /* Inverse definition of ir_grid_points in get_BZ_triplets_at_q */ /* in triplet_grid.c. */ static long *create_gp2tp_map(const long *triplets_map, const long num_gp) { long i, num_ir; long *gp2tp_map; gp2tp_map = (long *)malloc(sizeof(long) * num_gp); num_ir = 0; for (i = 0; i < num_gp; i++) { if (triplets_map[i] == i) { gp2tp_map[i] = num_ir; num_ir++; } else { /* This should not be used. */ gp2tp_map[i] = -1; } } return gp2tp_map; }
GB_unop__acos_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__acos_fp64_fp64 // op(A') function: GB_unop_tran__acos_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = acos (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = acos (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = acos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOS || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__acos_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = acos (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__acos_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
9372.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp parallel for schedule(dynamic, 4) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp parallel for schedule(dynamic, 4) for (i = 0; i < _PB_N; i++) { #pragma omp parallel for schedule(dynamic, 4) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp parallel for schedule(dynamic, 4) for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp parallel for schedule(dynamic, 4) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include "http_stream.h" #include <stdio.h> #include <stdlib.h> #include <string.h> extern int check_mistakes; #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; if(dontuse_opencv) im = load_image_stb_resize(paths[i], 0, 0, 3); else im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[i]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) { printf("\n Error in read_boxes() \n"); getchar(); } *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); if (check_mistakes) getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth_smooth(paths[i], labels, k, y.vals[i], label_smooth_eps); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*t_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; /* { // fix out of Mosaic-bound float left_bound = 0, right_bound = 0, top_bound = 0, bot_bound = 0; if (i_mixup == 0) { left_bound = 0; right_bound = cut_x; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 1) { left_bound = cut_x; right_bound = w; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 2) { left_bound = 0; right_bound = cut_x; top_bound = cut_y; bot_bound = h; } if (i_mixup == 3) { left_bound = cut_x; right_bound = w; top_bound = cut_y; bot_bound = h; } if (left < left_bound) { //printf(" i_mixup = %d, left = %d, left_bound = %f \n", i_mixup, left, left_bound); left = left_bound; } if (right > right_bound) { //printf(" i_mixup = %d, right = %d, right_bound = %f \n", i_mixup, right, right_bound); right = right_bound; } if (top < top_bound) top = top_bound; if (bot > bot_bound) bot = bot_bound; xb = ((float)(right + left) / 2) / w; wb = ((float)(right - left)) / w; yb = ((float)(bot + top) / 2) / h; hb = ((float)(bot - top)) / h; } */ { // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; } // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; if (use_mixup == 2 || use_mixup == 4) { printf("\n cutmix=1 - isn't supported for Detector (use cutmix=1 only for Classifier) \n"); if (check_mistakes) getchar(); if(use_mixup == 2) use_mixup = 0; else use_mixup = 3; } if (use_mixup == 3 && letter_box) { printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); if (check_mistakes) getchar(); exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0, gaussian_noise = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { printf("\n Error in load_data_detection() - OpenCV \n"); fflush(stdout); if (check_mistakes) { getchar(); } continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; // < 0 int min_rdh = oh*(1 - (1 / resize_down)) / 2; // < 0 if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; // > 0 int max_rdh = oh*(1 - (1 / resize_up)) / 2; // > 0 //printf(" down = %f, up = %f \n", (1 - (1 / resize_down)) / 2, (1 - (1 / resize_up)) / 2); if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } if (use_gaussian_noise && rand_int(0, 1) == 1) gaussian_noise = use_gaussian_noise; else gaussian_noise = 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); } /* // move each 2nd image to the corner - so that most of it was visible if (use_mixup == 3 && random_gen() % 2 == 0) { if (flip) { if (i_mixup == 0) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pleft += pright, pright = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; } else { if (i_mixup == 0) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pleft += pright, pright = 0, ptop += pbot, pbot = 0; } } */ int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, boxes, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); //assert(use_mixup < 2); if (use_mixup == 2) { printf("\n cutmix=1 - isn't supported for Detector \n"); exit(0); } if (use_mixup == 3 || use_mixup == 4) { printf("\n mosaic=1 - compile Darknet with OpenCV for using mosaic=1 \n"); exit(0); } int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; int min_rdh = oh*(1 - (1 / resize_down)) / 2; if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; int max_rdh = oh*(1 - (1 / resize_up)) / 2; if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.gaussian_noise, a.blur, a.mixup, a.jitter, a.resize, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } static const int thread_wait_ms = 5; static volatile int flag_exit; static volatile int * run_load_data = NULL; static load_args * args_swap = NULL; static pthread_t* threads = NULL; pthread_mutex_t mtx_load_data = PTHREAD_MUTEX_INITIALIZER; void *run_thread_loop(void *ptr) { const int i = *(int *)ptr; while (!custom_atomic_load_int(&flag_exit)) { while (!custom_atomic_load_int(&run_load_data[i])) { if (custom_atomic_load_int(&flag_exit)) { free(ptr); return 0; } this_thread_sleep_for(thread_wait_ms); } pthread_mutex_lock(&mtx_load_data); load_args *args_local = (load_args *)xcalloc(1, sizeof(load_args)); *args_local = args_swap[i]; pthread_mutex_unlock(&mtx_load_data); load_thread(args_local); custom_atomic_store_int(&run_load_data[i], 0); } free(ptr); return 0; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); if (!threads) { threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); run_load_data = (volatile int *)xcalloc(args.threads, sizeof(int)); args_swap = (load_args *)xcalloc(args.threads, sizeof(load_args)); fprintf(stderr, " Create %d permanent cpu-threads \n", args.threads); for (i = 0; i < args.threads; ++i) { int* ptr = (int*)xcalloc(1, sizeof(int)); *ptr = i; if (pthread_create(&threads[i], 0, run_thread_loop, ptr)) error("Thread creation failed"); } } for (i = 0; i < args.threads; ++i) { args.d = buffers + i; args.n = (i + 1) * total / args.threads - i * total / args.threads; pthread_mutex_lock(&mtx_load_data); args_swap[i] = args; pthread_mutex_unlock(&mtx_load_data); custom_atomic_store_int(&run_load_data[i], 1); // run thread } for (i = 0; i < args.threads; ++i) { while (custom_atomic_load_int(&run_load_data[i])) this_thread_sleep_for(thread_wait_ms); // join } /* pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } */ *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); //free(threads); return 0; } void free_load_threads(void *ptr) { load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; int i; if (threads) { custom_atomic_store_int(&flag_exit, 1); for (i = 0; i < args.threads; ++i) { pthread_join(threads[i], 0); } free((void*)run_load_data); free(args_swap); free(threads); threads = NULL; custom_atomic_store_int(&flag_exit, 0); } } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 2) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 17; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
if-clauseModificado.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid, x; int a[n],suma=0,sumalocal; if(argc < 3) { fprintf(stderr,"Uso: \n %s <num-iteraciones> <num-hebras>\n", argv[0]); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) { a[i] = i; } x = atoi(argv[2]); if (x < 1) x = 1; #pragma omp parallel if(n>4) default(none) private(sumalocal,tid) \ shared(a,suma,n) num_threads(x) { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n",tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } }
omp_for_bigbounds.c
// RUN: %libomp-compile -DMY_SCHEDULE=static && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=dynamic && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run // Only works with Intel Compiler since at least version 15.0 and clang since // version 11. // XFAIL: gcc, clang-3, clang-4, clang-5, clang-6, clang-7, clang-8, clang-9, clang-10 /* * Test that large bounds are handled properly and calculations of * loop iterations don't accidentally overflow */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 50000000 #define MY_MAX 2000000000 #define MY_MIN -2000000000 #ifndef MY_SCHEDULE # define MY_SCHEDULE static #endif int a, b, a_known_value, b_known_value; int test_omp_for_bigbounds() { a = 0; b = 0; #pragma omp parallel { int i; #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); return (a == a_known_value && b == b_known_value); } int main() { int i; int num_failed=0; a_known_value = 0; for (i = INT_MIN; i < MY_MAX; i+=INCR) { a_known_value++; } b_known_value = 0; for (i = INT_MAX; i >= MY_MIN; i-=INCR) { b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_bigbounds()) { num_failed++; } } return num_failed; }
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * Copyright (C) ARM Ltd. 2017-2021. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "perftest.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/sys/sock.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <unistd.h> #include <netdb.h> #include <sys/poll.h> test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency", "latency", 1}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency", "latency", 1}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate", "latency", 1}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate", "latency", 1}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate", "latency", 1}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate", "overhead", 1}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate", "overhead", 1}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate", "overhead", 1}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency", "latency", 1}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth", "overhead", 32}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency", "latency", 1}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth", "overhead", 32}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth", "overhead", 32}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate", "overhead", 1}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate", "latency", 1}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate", "latency", 1}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate", "latency", 1}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth", "overhead", 1}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency", "latency", 1}, {"ucp_am_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "am latency", "latency", 1}, {"ucp_am_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "am bandwidth / message rate", "overhead", 32}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { typedef ssize_t (*sock_call)(int, void *, size_t, int); ucs_assert(sock >= 0); return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { ucs_assert(sock >= 0); return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } ucs_status_t init_test_params(perftest_params_t *params) { memset(params, 0, sizeof(*params)); params->super.api = UCX_PERF_API_LAST; params->super.command = UCX_PERF_CMD_LAST; params->super.test_type = UCX_PERF_TEST_TYPE_LAST; params->super.thread_mode = UCS_THREAD_MODE_SINGLE; params->super.thread_count = 1; params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE; params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST; params->super.max_outstanding = 0; params->super.warmup_iter = 10000; params->super.alignment = ucs_get_page_size(); params->super.max_iter = 1000000l; params->super.max_time = 0.0; params->super.report_interval = 1.0; params->super.percentile_rank = 50.0; params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE; params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->super.uct.am_hdr_size = 8; params->super.send_mem_type = UCS_MEMORY_TYPE_HOST; params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST; params->super.msg_size_cnt = 1; params->super.iov_stride = 0; params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.am_hdr_size = 0; strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE); params->super.msg_size_list = calloc(params->super.msg_size_cnt, sizeof(*params->super.msg_size_list)); if (params->super.msg_size_list == NULL) { return UCS_ERR_NO_MEMORY; } params->super.msg_size_list[0] = 8; params->test_id = TEST_ID_UNDEFINED; return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { sock_rte_group_t *group = rte_group; return group->size; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { sock_rte_group_t *group = rte_group; if (group->size > 1) { const unsigned magic = 0xdeadbeef; unsigned snc; snc = magic; safe_send(group->sendfd, &snc, sizeof(unsigned), progress, arg); snc = 0; if (safe_recv(group->recvfd, &snc, sizeof(unsigned), progress, arg) == 0) { ucs_assert(snc == magic); } } } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->sendfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->sendfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; size_t size; if (src != group->peer) { return; } safe_recv(group->recvfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->recvfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, const char *extra_info, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, extra_info, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte_loobkack(struct perftest_context *ctx) { int connfds[2]; int ret; ctx->flags |= TEST_FLAG_PRINT_TEST | TEST_FLAG_PRINT_RESULTS; ret = socketpair(AF_UNIX, SOCK_STREAM, 0, connfds); if (ret < 0) { ucs_error("socketpair() failed: %m"); return UCS_ERR_IO_ERROR; } ctx->sock_rte_group.peer = 0; ctx->sock_rte_group.size = 1; ctx->sock_rte_group.is_server = 1; ctx->sock_rte_group.sendfd = connfds[0]; ctx->sock_rte_group.recvfd = connfds[1]; return UCS_OK; } static ucs_status_t setup_sock_rte_p2p(struct perftest_context *ctx) { int optval = 1; int sockfd = -1; char addr_str[UCS_SOCKADDR_STRING_LEN]; struct sockaddr_storage client_addr; socklen_t client_addr_len; int connfd; struct addrinfo hints, *res, *t; ucs_status_t status; int ret; char service[8]; char err_str[64]; ucs_snprintf_safe(service, sizeof(service), "%u", ctx->port); memset(&hints, 0, sizeof(hints)); hints.ai_flags = (ctx->server_addr == NULL) ? AI_PASSIVE : 0; hints.ai_family = ctx->af; hints.ai_socktype = SOCK_STREAM; ret = getaddrinfo(ctx->server_addr, service, &hints, &res); if (ret < 0) { ucs_error("getaddrinfo(server:%s, port:%s) error: [%s]", ctx->server_addr, service, gai_strerror(ret)); status = UCS_ERR_IO_ERROR; goto out; } if (res == NULL) { snprintf(err_str, 64, "getaddrinfo() returned empty list"); } for (t = res; t != NULL; t = t->ai_next) { sockfd = socket(t->ai_family, t->ai_socktype, t->ai_protocol); if (sockfd < 0) { snprintf(err_str, 64, "socket() failed: %m"); continue; } if (ctx->server_addr != NULL) { if (connect(sockfd, t->ai_addr, t->ai_addrlen) == 0) { break; } snprintf(err_str, 64, "connect() failed: %m"); } else { status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (status != UCS_OK) { status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } if (bind(sockfd, t->ai_addr, t->ai_addrlen) == 0) { ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ client_addr_len = sizeof(client_addr); connfd = accept(sockfd, (struct sockaddr*)&client_addr, &client_addr_len); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } ucs_sockaddr_str((struct sockaddr*)&client_addr, addr_str, sizeof(addr_str)); printf("Accepted connection from %s\n", addr_str); close(sockfd); break; } snprintf(err_str, 64, "bind() failed: %m"); } close(sockfd); sockfd = -1; } if (sockfd < 0) { ucs_error("%s failed. %s", (ctx->server_addr != NULL) ? "client" : "server", err_str); status = UCS_ERR_IO_ERROR; goto out_free_res; } if (ctx->server_addr == NULL) { /* release the memory for the list of the message sizes allocated * during the initialization of the default testing parameters */ free(ctx->params.super.msg_size_list); ctx->params.super.msg_size_list = NULL; ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } if (ctx->params.super.msg_size_cnt != 0) { ctx->params.super.msg_size_list = calloc(ctx->params.super.msg_size_cnt, sizeof(*ctx->params.super.msg_size_list)); if (NULL == ctx->params.super.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } ret = safe_recv(connfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } } ctx->sock_rte_group.sendfd = connfd; ctx->sock_rte_group.recvfd = connfd; ctx->sock_rte_group.peer = 1; ctx->sock_rte_group.is_server = 1; } else { safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.super.msg_size_cnt != 0) { safe_send(sockfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.sendfd = sockfd; ctx->sock_rte_group.recvfd = sockfd; ctx->sock_rte_group.peer = 0; ctx->sock_rte_group.is_server = 0; } ctx->sock_rte_group.size = 2; if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } status = UCS_OK; goto out_free_res; err_close_connfd: ucs_close_fd(&connfd); goto out_free_res; err_close_sockfd: ucs_close_fd(&sockfd); out_free_res: freeaddrinfo(res); out: return status; } static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { ucs_status_t status; if (ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) { status = setup_sock_rte_loobkack(ctx); } else { status = setup_sock_rte_p2p(ctx); } if (status != UCS_OK) { return status; } ctx->params.super.rte_group = &ctx->sock_rte_group; ctx->params.super.rte = &sock_rte; ctx->params.super.report_arg = ctx; return UCS_OK; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { sock_rte_group_t *rte_group = &ctx->sock_rte_group; close(rte_group->sendfd); if (rte_group->sendfd != rte_group->recvfd) { close(rte_group->recvfd); } return UCS_OK; } #if defined (HAVE_MPI) static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp barrier #pragma omp master { /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest != rte_peer_index(group_size, my_rank)) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; int my_rank, size; size_t offset; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (src != rte_peer_index(size, my_rank)) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, const char *extra_info, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, extra_info, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } #elif defined (HAVE_RTE) static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, const char *extra_info, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, extra_info, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { #if defined (HAVE_MPI) static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; int size, rank; ucs_trace_func(""); MPI_Comm_size(MPI_COMM_WORLD, &size); if ((ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) && (size != 1)) { ucs_error("This test should be run with 1 process " "in loopback case (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } if (!(ctx->params.super.flags & UCX_PERF_TEST_FLAG_LOOPBACK) && (size != 2)) { ucs_error("This test should be run with exactly 2 processes " "in p2p case (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* Let the last rank print the results */ if (rank == (size - 1)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = NULL; ctx->params.super.rte = &mpi_rte; ctx->params.super.report_arg = ctx; #elif defined (HAVE_RTE) ucs_trace_func(""); ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; rte_group_t group; rte_init(NULL, NULL, &group); /* Let the last rank print the results */ if (rte_group_rank(group) == (rte_group_size(group) - 1)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = group; ctx->params.super.rte = &ext_rte; ctx->params.super.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #ifdef HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { ucs_sys_cpuset_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = ucs_sys_get_num_cpus(); if (ret < 0) { return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { for (i = 0; i < ctx->num_cpus; i++) { if (ctx->cpus[i] >= nr_cpus) { ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } } for (i = 0; i < ctx->num_cpus; i++) { CPU_SET(ctx->cpus[i], &cpuset); } ret = ucs_sys_setaffinity(&cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = ucs_sys_getaffinity(&cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #ifdef HAVE_MPI int provided; mpi_initialized = !isatty(0) && /* Using MPI_THREAD_FUNNELED since ucx_perftest supports * using multiple threads when only the main one makes * MPI calls (which is also suitable for a single threaded * run). * MPI_THREAD_FUNNELED: * The process may be multi-threaded, but only the main * thread will make MPI calls (all MPI calls are funneled * to the main thread). */ (MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0); if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) { printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n", provided); ret = -1; goto out; } #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out_msg_size_list; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #ifdef HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out_msg_size_list: free(ctx.params.super.msg_size_list); #if HAVE_MPI out: #endif if (mpi_initialized) { #ifdef HAVE_MPI MPI_Finalize(); #endif } return ret; }
Morpho.h
/* * Copyright (c) 2021 The Foundation for Research on Information Technologies in Society (IT'IS). * * This file is part of iSEG * (see https://github.com/ITISFoundation/osparc-iseg). * * This software is released under the MIT License. * https://opensource.org/licenses/MIT */ #pragma once #include "Data/ItkProgressObserver.h" #include "Data/ItkUtils.h" #include "Data/ScopeExit.h" #include "Data/SlicesHandlerITKInterface.h" #include <itkBinaryDilateImageFilter.h> #include <itkBinaryErodeImageFilter.h> #include <itkBinaryThresholdImageFilter.h> #include <itkFlatStructuringElement.h> #include <itkImageRegionIteratorWithIndex.h> #include <itkPasteImageFilter.h> #include <boost/variant.hpp> namespace iseg { namespace details { template<unsigned int Dimension> itk::FlatStructuringElement<Dimension> MakeBall(const typename itk::ImageBase<Dimension>::SpacingType& spacing, double radius) { auto ball = iseg::MakeBall<bool, Dimension>(spacing, radius); return itk::FlatStructuringElement<Dimension>::FromImage(ball); } template<unsigned int Dimension> itk::FlatStructuringElement<Dimension> MakeBall(const itk::Size<Dimension>& radius) { bool radius_is_parametric = true; return itk::FlatStructuringElement<Dimension>::Ball(radius, radius_is_parametric); } } template<class TInputImage> itk::FlatStructuringElement<TInputImage::ImageDimension> MakeBall(typename TInputImage::Pointer input, boost::variant<int, float> radius) { itkStaticConstMacro(Dimension, unsigned int, TInputImage::ImageDimension); using spacing_type = typename TInputImage::SpacingType; class MyVisitor : public boost::static_visitor<itk::FlatStructuringElement<Dimension>> { public: explicit MyVisitor(const spacing_type& spacing) : m_Spacing(spacing) {} itk::FlatStructuringElement<Dimension> operator()(int r) const { itk::Size<Dimension> radius; radius.Fill(r); return details::MakeBall<Dimension>(radius); } itk::FlatStructuringElement<Dimension> operator()(float r) const { return details::MakeBall<Dimension>(m_Spacing, static_cast<double>(r)); } private: spacing_type m_Spacing; }; auto ball = boost::apply_visitor(MyVisitor(input->GetSpacing()), radius); return ball; } enum eOperation { kErode, kDilate, kClose, kOpen }; template<class TInputImage, class TOutputImage = itk::Image<unsigned char, TInputImage::ImageDimension>> typename TOutputImage::Pointer MorphologicalOperation(typename TInputImage::Pointer input, boost::variant<int, float> radius, eOperation operation, const typename TInputImage::RegionType& requested_region, iseg::ProgressInfo* progress = nullptr) { using input_image_type = TInputImage; using image_type = TOutputImage; itkStaticConstMacro(Dimension, unsigned int, TInputImage::ImageDimension); using kernel_type = itk::FlatStructuringElement<Dimension>; auto ball = MakeBall<TInputImage>(input, radius); unsigned char foreground_value = 255; auto threshold = itk::BinaryThresholdImageFilter<input_image_type, image_type>::New(); threshold->SetInput(input); threshold->SetLowerThreshold(0.001f); // background is '0' threshold->SetInsideValue(foreground_value); std::vector<typename itk::ImageSource<image_type>::Pointer> filters; if (operation == eOperation::kErode || operation == eOperation::kOpen) { auto erode = itk::BinaryErodeImageFilter<image_type, image_type, kernel_type>::New(); erode->SetInput(threshold->GetOutput()); erode->SetKernel(ball); erode->SetErodeValue(foreground_value); erode->SetBackgroundValue(0); filters.push_back(typename itk::ImageSource<image_type>::Pointer(erode)); if (operation == kOpen) { auto dilate = itk::BinaryDilateImageFilter<image_type, image_type, kernel_type>::New(); dilate->SetInput(erode->GetOutput()); dilate->SetKernel(ball); dilate->SetDilateValue(foreground_value); filters.push_back(typename itk::ImageSource<image_type>::Pointer(dilate)); } } else { auto dilate = itk::BinaryDilateImageFilter<image_type, image_type, kernel_type>::New(); dilate->SetInput(threshold->GetOutput()); dilate->SetKernel(ball); dilate->SetDilateValue(foreground_value); filters.push_back(typename itk::ImageSource<image_type>::Pointer(dilate)); if (operation == kClose) { auto erode = itk::BinaryErodeImageFilter<image_type, image_type, kernel_type>::New(); erode->SetInput(dilate->GetOutput()); erode->SetKernel(ball); erode->SetErodeValue(foreground_value); erode->SetBackgroundValue(0); filters.push_back(typename itk::ImageSource<image_type>::Pointer(erode)); } } if (progress) { auto observer = iseg::ItkProgressObserver::New(); observer->SetProgressInfo(progress); for (const auto& filter : filters) filter->AddObserver(itk::ProgressEvent(), observer); } filters.back()->GetOutput()->SetRequestedRegion(requested_region); filters.back()->Update(); return filters.back()->GetOutput(); } /** \brief Do morpological operation on target image */ void MorphologicalOperation(iseg::SlicesHandlerInterface* handler, boost::variant<int, float> radius, eOperation operation, bool true3d, iseg::ProgressInfo* progress) { iseg::SlicesHandlerITKInterface itkhandler(handler); if (true3d) { using input_type = iseg::SlicesHandlerITKInterface::image_ref_type; using output_type = itk::Image<unsigned char, 3>; auto target = itkhandler.GetTarget(true); // get active slices auto region = target->GetBufferedRegion(); auto output = MorphologicalOperation<input_type>(target, radius, operation, region, progress); iseg::Paste<output_type, input_type>(output, target); } else { using input_type = itk::Image<float, 2>; using output_type = itk::Image<unsigned char, 2>; std::int64_t startslice = handler->StartSlice(); std::int64_t endslice = handler->EndSlice(); if (progress) { progress->SetNumberOfSteps(endslice - startslice); } #pragma omp parallel for for (std::int64_t slice = startslice; slice < endslice; ++slice) { auto target = itkhandler.GetTargetSlice(slice); auto region = target->GetBufferedRegion(); auto output = MorphologicalOperation<input_type>(target, radius, operation, region, nullptr); iseg::Paste<output_type, input_type>(output, target); if (progress) { progress->Increment(); } } } } } // namespace iseg
omp_for_private.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" /* Utility function do spend some time in a loop */ static void do_some_work() { int i; double sum = 0; for(i = 0; i < 1000; i++){ sum += sqrt ((double) i); } } int sum1; #pragma omp threadprivate(sum1) int test_omp_for_private() { int sum = 0; int sum0; int known_sum; sum0 = 0; /* setting (global) sum0 = 0 */ #pragma omp parallel { sum1 = 0; /* setting sum1 in each thread to 0 */ { /* begin of orphaned block */ int i; #pragma omp for private(sum0) schedule(static,1) for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum1; #pragma omp flush sum0 = sum0 + i; do_some_work (); #pragma omp flush sum1 = sum0; } } /* end of orphaned block */ #pragma omp critical { sum = sum + sum1; } /*end of critical*/ } /* end of parallel*/ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_private()) { num_failed++; } } return num_failed; }
kmeans.c
/** * @file kmeans.c * @author Sylvan Brocard (sbrocard@upmem.com) * @brief Main file for the KMeans algorithm. */ #include <dpu.h> #include <dpu_log.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include <fcntl.h> #include <omp.h> #include <getopt.h> #include <libgen.h> #include <sys/time.h> #include "../kmeans.h" /** * @brief Allocates all DPUs * * @param p Algorithm parameters. */ void allocate(Params *p) { DPU_ASSERT(dpu_alloc(DPU_ALLOCATE_ALL, NULL, &p->allset)); DPU_ASSERT(dpu_get_nr_dpus(p->allset, &p->ndpu)); } /** * @brief Returns the seconds elapsed between two timeval structures. * * @param tic [in] First timeval. * @param toc [in] Second timeval. * @return double Elapsed time in seconds. */ double time_seconds(struct timeval tic, struct timeval toc) { struct timeval timing; timing.tv_sec += toc.tv_sec - tic.tv_sec; timing.tv_usec += toc.tv_usec - tic.tv_usec; double time = ((double)(timing.tv_sec * 1000000 + timing.tv_usec)) / 1000000; return time; } /** * @brief Removes the extension from a file name. * * @param fname [in] The file name string. */ static void strip_ext(char *fname) { char *end = fname + strlen(fname); while (end > fname && *end != '.') --end; if (end > fname) *end = '\0'; } /** * @brief Reads a binary input file from disk. */ void read_bin_input( Params *p, /**< Algorithm parameters */ const char *filename, /**< [in] The file name. */ float ***features_out) /**< [out] Vector of features. */ { float **features; FILE *infile; if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, "Error: no such file (%s)\n", filename); exit(1); } /* get nfeatures and npoints */ fread(&p->npoints, sizeof(uint64_t), 1, infile); fread(&p->nfeatures, sizeof(int), 1, infile); /* rounding the size of the input to the smallest multiple of 8*ndpu larger than npoints */ p->npadded = ((p->npoints + 8 * p->ndpu - 1) / (8 * p->ndpu)) * 8 * p->ndpu; /* allocate space for features[][] and read attributes of all objects */ features = (float **)malloc(p->npadded * sizeof(*features)); features[0] = (float *)malloc(p->npadded * p->nfeatures * sizeof(**features)); for (int ipoint = 1; ipoint < p->npadded; ipoint++) features[ipoint] = features[ipoint - 1] + p->nfeatures; /* checking that we managed to assign enough memory */ if (!features[0]) { perror("malloc features[0]"); exit(EXIT_FAILURE); } fread(features[0], sizeof(float), p->npoints * p->nfeatures, infile); fclose(infile); *features_out = features; } /** * @brief Reads a text input file from disk. */ void read_txt_input( Params *p, /**< Algorithm parameters */ const char *filename, /**< [in] The file name. */ float ***features_out) /**< [out] Vector of features. */ { char line[1024]; float **features; FILE *infile; if ((infile = fopen(filename, "r")) == NULL) { fprintf(stderr, "Error: no such file (%s)\n", filename); exit(1); } while (fgets(line, 1024, infile) != NULL) if (strtok(line, " \t\n") != 0) p->npoints++; rewind(infile); while (fgets(line, 1024, infile) != NULL) { if (strtok(line, " \t\n") != 0) { /* ignore the id (first attribute): nfeatures = 1; */ while (strtok(NULL, " ,\t\n") != NULL) p->nfeatures++; break; } } /* rounding the size of the input to the smallest multiple of 8*ndpu larger than npoints */ p->npadded = ((p->npoints + 8 * p->ndpu - 1) / (8 * p->ndpu)) * 8 * p->ndpu; /* allocate space for features[] and read attributes of all objects */ features = (float **)malloc(p->npadded * sizeof(*features)); features[0] = (float *)malloc(p->npadded * p->nfeatures * sizeof(**features)); for (int ipoint = 1; ipoint < p->npadded; ipoint++) features[ipoint] = features[ipoint - 1] + p->nfeatures; /* checking that we managed to assign enough memory */ if (!features[0]) { perror("malloc features[0]"); exit(EXIT_FAILURE); } rewind(infile); { int ifeature_global = 0; while (fgets(line, 1024, infile) != NULL) { if (strtok(line, " \t\n") == NULL) continue; for (int ifeature = 0; ifeature < p->nfeatures; ifeature++) { features[0][ifeature_global] = atof(strtok(NULL, " ,\t\n")); ifeature_global++; } } } fclose(infile); *features_out = features; } /** * @brief Saves the input data in a binary file for faster access next time. * * @param p Algorithm parameters. * @param filename_in [in] Name of the input text file. * @param features [npoints][nfeatures] Feature array. */ void save_dat_file(Params *p, const char *filename_in, float **features) { char *filename = strdup(filename_in); char suffix[] = ".dat"; int n = strlen(filename) + strlen(suffix); char *dat_name = (char *)malloc(n * sizeof(*dat_name)); strcpy(dat_name, filename); strip_ext(dat_name); strcat(dat_name, ".dat"); printf("Writing points in binary format to %s\n", dat_name); FILE *binfile; binfile = fopen(dat_name, "wb"); fwrite(&p->npoints, sizeof(p->npoints), 1, binfile); fwrite(&p->nfeatures, sizeof(p->nfeatures), 1, binfile); fwrite(features[0], sizeof(*features[0]), p->npoints * p->nfeatures, binfile); fclose(binfile); free(filename); free(dat_name); } /** * @brief Formats a flat array into a bidimensional representation */ void format_array_input( Params *p, /**< Algorithm parameters. */ float *data, /**< [in] The data as a flat table */ float ***features_out) /**< [out] The data as two dimensional table */ { // uint64_t npadded; p->npadded = ((p->npoints + 8 * p->ndpu - 1) / (8 * p->ndpu)) * 8 * p->ndpu; float **features = (float **)malloc(p->npadded * sizeof(*features)); features[0] = data; for (int ipoint = 1; ipoint < p->npadded; ipoint++) features[ipoint] = features[ipoint - 1] + p->nfeatures; *features_out = features; } /** * @brief Preprocesses the data before running the KMeans algorithm. * * @return float Scaling factor applied to the input data. */ void preprocessing( Params *p, /**< Algorithm parameters */ float **features_float, /**< [in] Features as floats. */ int_feature ***features_int_out, /**< [out] Features as integers. */ int verbose) /**< [in] Whether or not to print runtime information. */ { uint64_t ipoint; int ifeature; float *mean; float *variance; int_feature **features_int; float avg_variance; float max_feature = 0; #ifdef PERF_COUNTER struct timeval tic, toc; gettimeofday(&tic, NULL); #endif p->npointperdpu = p->npadded / p->ndpu; /* DEBUG : print features head */ // printf("features head:\n"); // for (int ipoint = 0; ipoint < 10; ipoint++) // { // for (int ifeature = 0; ifeature < nfeatures; ifeature++) // printf("%.4f ", features[ipoint][ifeature]); // printf("\n"); // } // printf("\n"); mean = (float *)calloc(p->nfeatures, sizeof(*p->mean)); variance = (float *)calloc(p->nfeatures, sizeof(*variance)); /* compute mean by feature */ #pragma omp parallel for collapse(2) \ reduction(+ \ : mean[:p->nfeatures]) for (ifeature = 0; ifeature < p->nfeatures; ifeature++) for (ipoint = 0; ipoint < p->npoints; ipoint++) mean[ifeature] += features_float[ipoint][ifeature]; #pragma omp parallel for for (ifeature = 0; ifeature < p->nfeatures; ifeature++) mean[ifeature] /= p->npoints; p->mean = mean; if (verbose) { printf("means = "); for (ifeature = 0; ifeature < p->nfeatures; ifeature++) printf(" %.4f", p->mean[ifeature]); printf("\n"); } /* subtract mean from each feature */ #pragma omp parallel for collapse(2) for (ipoint = 0; ipoint < p->npoints; ipoint++) for (ifeature = 0; ifeature < p->nfeatures; ifeature++) features_float[ipoint][ifeature] -= p->mean[ifeature]; /* ****** discretization ****** */ /* get maximum absolute value of features */ #pragma omp parallel for collapse(2) \ reduction(max \ : max_feature) for (ipoint = 0; ipoint < p->npoints; ipoint++) for (ifeature = 0; ifeature < p->nfeatures; ifeature++) if (fabsf(features_float[ipoint][ifeature]) > max_feature) max_feature = fabsf(features_float[ipoint][ifeature]); switch (sizeof(int_feature)) { case 1UL: p->scale_factor = INT8_MAX / max_feature / 2; break; case 2UL: p->scale_factor = INT16_MAX / max_feature / 2; break; case 4UL: p->scale_factor = INT32_MAX / max_feature / 2; break; default: printf("Error: unsupported type for int_feature.\n"); exit(0); } if (verbose) { printf("max absolute value : %f\n", max_feature); printf("scale factor = %.4f\n", p->scale_factor); } /* allocate space for features_int[][] and convert attributes of all objects */ features_int = (int_feature **)malloc(p->npadded * sizeof(*features_int)); features_int[0] = (int_feature *)malloc(p->npadded * p->nfeatures * sizeof(features_int)); for (ipoint = 1; ipoint < p->npadded; ipoint++) features_int[ipoint] = features_int[ipoint - 1] + p->nfeatures; /* checking that we managed to assign enough memory */ if (!features_int[0]) { perror("malloc features_int[0]"); exit(EXIT_FAILURE); } #pragma omp parallel for collapse(2) for (ipoint = 0; ipoint < p->npoints; ipoint++) for (ifeature = 0; ifeature < p->nfeatures; ifeature++) features_int[ipoint][ifeature] = lroundf(features_float[ipoint][ifeature] * p->scale_factor); /* DEBUG : print features head */ // printf("features head:\n"); // for (int ipoint = 0; ipoint < (npoints >= 10 ? 10 : npoints); ipoint++) // { // for (int ifeature = 0; ifeature < nfeatures; ifeature++) // printf("%8d ", features_int[ipoint][ifeature]); // printf("\n"); // } // printf("\n"); /* DEBUG : print features maxes */ // printf("features max:\n"); // for (ifeature = 0; ifeature < nfeatures; ifeature++){ // int max_features_int = 0; // for (ipoint = 0; ipoint < npoints; ipoint++){ // if (features_int[ipoint][ifeature] > max_features_int) // max_features_int = features_int[ipoint][ifeature]; // } // printf("%d ", max_features_int); // } // printf("\n"); /* ***** discretization end ***** */ /* compute variance by feature */ #pragma omp parallel for collapse(2) \ reduction(+ \ : variance[:p->nfeatures]) for (ipoint = 0; ipoint < p->npoints; ipoint++) for (ifeature = 0; ifeature < p->nfeatures; ifeature++) variance[ifeature] += features_float[ipoint][ifeature] * features_float[ipoint][ifeature]; #pragma omp parallel for for (ifeature = 0; ifeature < p->nfeatures; ifeature++) variance[ifeature] /= p->npoints; /* compute average of variance */ avg_variance = 0; #pragma omp parallel for reduction(+ \ : avg_variance) for (ifeature = 0; ifeature < p->nfeatures; ifeature++) avg_variance += variance[ifeature]; avg_variance /= p->nfeatures; p->threshold *= avg_variance; #ifdef PERF_COUNTER /* compute time spent on preprocessing */ gettimeofday(&toc, NULL); printf("preprocessing time: %f seconds\n\n", time_seconds(tic, toc)); #endif if (verbose) { printf("avg_variance = %.4f\n", avg_variance); printf("threshold = %.4f\n", p->threshold); printf("\npreprocessing completed\n\n"); } free(variance); /* DEBUG */ // printf("means:"); // for(i = 0; i< nfeatures; i++) // { // printf("%d ", mean[i]); // } // printf("\n"); // for(i = 0; i < 5; i++) // { // for(j = 0; j< nfeatures; j++) // { // printf("%d ", features_int[i][j]); // } // printf("\n"); // } *features_int_out = features_int; } /** * @brief Restores the input data to its original state. * * @param p [in] Algorithm parameters. * @param features [in,out] array of features */ void postprocessing(Params *p, float **features_float) { #pragma omp parallel for collapse(2) for (uint64_t ipoint = 0; ipoint < p->npoints; ipoint++) for (int ifeature = 0; ifeature < p->nfeatures; ifeature++) features_float[ipoint][ifeature] += p->mean[ifeature]; } /** * @brief Checks for errors in the input * * @param p Algorithm parameters. */ static void error_check(Params *p) { if (p->npoints < p->min_nclusters) { printf("Error: min_nclusters(%d) > npoints(%lu) -- cannot proceed\n", p->min_nclusters, p->npoints); exit(EXIT_FAILURE); } if ((p->max_nclusters < p->min_nclusters) || (p->max_nclusters > ASSUMED_NR_CLUSTERS)) { printf("Error: min_nclusters(%d) > max_nclusters(%lu) or max_nclusters > max clusters allowed(%d) -- cannot proceed\n", p->min_nclusters, p->npoints, ASSUMED_NR_CLUSTERS); exit(EXIT_FAILURE); } if (ASSUMED_NR_FEATURES < p->nfeatures) { printf("Error: nfeatures(%d) > max clusters allowed(%d) -- cannot proceed\n", p->nfeatures, ASSUMED_NR_FEATURES); exit(EXIT_FAILURE); } if (p->npadded * p->nfeatures / p->ndpu > MAX_FEATURE_DPU) { printf("Error: npadded*nfeatures/ndpu(%lu) > max features allowed per dpu(%d) -- cannot proceed\n", p->npadded * p->nfeatures / p->ndpu, MAX_FEATURE_DPU); exit(EXIT_FAILURE); } } /** * @brief Output to array. * * @param p Algorithm parameters. * @param best_nclusters [in] Best number of clusters according to RMSE. * @param cluster_centres [in] Coordinate of clusters centres for the best iteration. * @return float* The return array */ static float *array_output(Params *p, int best_nclusters, float **cluster_centres) { #pragma omp parallel for collapse(2) for (int icluster = 0; icluster < best_nclusters; icluster++) for (int ifeature = 0; ifeature < p->nfeatures; ifeature++) cluster_centres[icluster][ifeature] = cluster_centres[icluster][ifeature] + p->mean[ifeature]; return cluster_centres[0]; } /** * @brief Output to the command line. * */ static void cli_output( Params *p, /**< Algorithm parameters */ float **cluster_centres, /**< [in] coordinate of clusters centres for the best iteration */ float rmse, /**< [in] value of the RMSE for the best iteration */ int index) /**< [in] number of trials for the best RMSE */ { /* print cluster center coordinates */ if (p->min_nclusters == p->max_nclusters) { printf("\n================= Centroid Coordinates =================\n"); for (int icluster = 0; icluster < p->max_nclusters; icluster++) { printf("%2d:", icluster); for (int ifeature = 0; ifeature < p->nfeatures; ifeature++) printf(" % 10.6f", cluster_centres[icluster][ifeature]); printf("\n"); } } printf("Number of Iteration: %d\n", p->nloops); if (p->min_nclusters == p->max_nclusters && p->isRMSE) { if (p->nloops != 1) { // single k, multiple iteration printf("Number of trials to approach the best RMSE of %.3f is %d\n", rmse, index + 1); } else { // single k, single iteration printf("Root Mean Squared Error: %.3f\n", rmse); } } } /** * @brief Main function for the KMeans algorithm. * * @return float* The centroids coordinates found by the algorithm. */ float *kmeans_c( Params *p, /**< Algorithm parameters */ float **features_float, /**< [in] array of features */ int_feature **features_int, /**< [in] array of quantized features */ int *log_iterations, /**< [out] Number of iterations per nclusters */ double *log_time, /**< [out] Time taken per nclusters */ int *best_nclusters) /**< [out] best number of clusters according to RMSE */ { /* Variables for I/O. */ float *output_clusters; /* return pointer */ /* Data arrays. */ float **cluster_centres = NULL; /* array of centroid coordinates */ /* Generated values. */ int index; /* number of iterations on the best run */ float rmse; /* RMSE value */ if (p->isOutput) { printf("\nNumber of objects without padding: %lu\n", p->npoints); printf("Number of objects with padding: %lu\n", p->npadded); printf("Number of features: %d\n", p->nfeatures); printf("Number of DPUs: %d\n", p->ndpu); } /* Error check for clusters. */ error_check(p); /* ======================= core of the clustering ===================*/ cluster_centres = NULL; index = cluster( p, /* Algorithm parameters */ features_float, /* [in] array: [npoints][nfeatures] */ features_int, /* [in] array: [npoints][nfeatures] */ best_nclusters, /* [out] number between min and max */ &cluster_centres, /* [out] [best_nclusters][nfeatures] */ &rmse, /* [out] Root Mean Squared Error */ log_iterations, /* [out] log of the number of iterations */ log_time /* [out] log of the time taken */ ); /* =============== Array Output ====================== */ output_clusters = array_output(p, *best_nclusters, cluster_centres); /* =============== Command Line Output =============== */ if (p->isOutput) cli_output(p, cluster_centres, rmse, index); free(cluster_centres); return output_clusters; }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } public: /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } private: template<typename LOP, typename ROP> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { using namespace mxnet_op; const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, cpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, cpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } }); } #if MXNET_USE_CUDA template<typename LOP, typename ROP> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template<typename LOP, typename ROP> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, cpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel< mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, cpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr); }); }); } #if MXNET_USE_CUDA template<typename LOP, typename ROP> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 RspRspOp<LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); // lhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); } // rhs grad if (req[1] != kNullOp) { RspRspOp<ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); // rhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); } } template<typename xpu, typename LOP, typename ROP> static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value && std::is_same<mshadow_op::left, ROP>::value; CHECK(supported_ops) << "Only backward for mul is supported (LOP should be right, ROP should be left)"; const NDArray& out_grad = inputs[0]; const NDArray& lhs_in = inputs[1]; const NDArray& rhs_in = inputs[2]; const NDArray& lhs_grad = outputs[0]; const NDArray& rhs_grad = outputs[1]; const bool reverse = (outputs[0].storage_type() == kCSRStorage); if (reverse) { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]}, {rhs_grad.data()}); } else { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]}, {lhs_grad.data()}); } } public: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- RSP binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsRspDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) { // dense, rsp -> dense / rsp, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename OP> static void Compute_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu> *s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } #if MXNET_USE_CUDA template<typename OP> static void Compute_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu> *s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { if (req[0] == kNullOp) return; mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); Compute_<OP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename OP> static void ComputeWithBool(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kRowSparseStorage); const NDArray& rsp = (reverse)? inputs[0] : inputs[1]; DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kCSRStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseNone_<LOP, ROP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); BackwardUseIn_<LOP, ROP>(attrs, s, inputs, req, outputs); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto out_grad_stype = inputs[0].storage_type(); const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); } if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) || (lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) && out_grad_stype == kDefaultStorage) { // dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr] DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) } // namespace op } // namespace mxnet #ifdef __CUDACC__ #include "elemwise_binary_op.cuh" #endif // __CUDACC__ #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
sc_demo.c
/* main.c * Created by Mengyao Zhao on 06/23/11. * Version 0.1.5 * Last revision by Mengyao Zhao on 06/27/14. */ #include <stdlib.h> #include <stdint.h> #include <emmintrin.h> #include <zlib.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <unistd.h> #include "ssw.h" #include "kseq.h" #include <omp.h> #ifdef __GNUC__ #define LIKELY(x) __builtin_expect((x),1) #define UNLIKELY(x) __builtin_expect((x),0) #else #define LIKELY(x) (x) #define UNLIKELY(x) (x) #endif /*! @function @abstract Round an integer to the next closest power-2 integer. @param x integer to be rounded (in place) @discussion x will be modified. */ #define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) KSEQ_INIT(gzFile, gzread); // l is length // m is allocated memory void genSeq(kseq_t *read, kseq_t *ref) { const int reflen = 256; const int readlen = 128; int baseidx; read->name.s = strdup("READ"); read->seq.s = (char*) malloc(readlen*sizeof(char)+1); read->seq.m = readlen; ref->name.s = strdup("REF"); ref->seq.s = (char*) malloc(reflen*sizeof(char)+1); ref->seq.m = reflen; char bases[5] = "ACTG"; for(baseidx = 0; baseidx < reflen; ++baseidx) { char b = bases[rand()%4]; ref->seq.s[baseidx] = b; } ref->seq.l = reflen; for(baseidx = 0; baseidx < readlen; ++baseidx) { char b = bases[rand()%4]; read->seq.s[baseidx] = b; } read->seq.l = readlen; ref->seq.s[reflen] = '\0'; read->seq.s[readlen] = '\0'; /*printf("READ: %s\nREF: %s\n",read->seq.s,ref->seq.s);*/ } void freeSeq(kseq_t *seq) { free(seq->name.s); free(seq->seq.s); } static void ssw_write (const s_align* a, const kseq_t* ref_seq, const kseq_t* read, const char* read_seq, // strand == 0: original read; strand == 1: reverse complement read const int8_t* table, int8_t strand) { // 0: forward aligned ; 1: reverse complement aligned //fprintf(stdout, "target_name: %s\nquery_name: %s\noptimal_alignment_score: %d\t", ref_seq->name.s, read->name.s, a->score1); //if (a->score2 > 0) fprintf(stdout, "suboptimal_alignment_score: %d\t", a->score2); //if (strand == 0) fprintf(stdout, "strand: +\t"); //else fprintf(stdout, "strand: -\t"); //if (a->ref_begin1 + 1) fprintf(stdout, "target_begin: %d\t", a->ref_begin1 + 1); //fprintf(stdout, "target_end: %d\t", a->ref_end1 + 1); //if (a->read_begin1 + 1) fprintf(stdout, "query_begin: %d\t", a->read_begin1 + 1); //fprintf(stdout, "query_end: %d\n\n", a->read_end1 + 1); if (a->cigar) { int32_t c = 0, left = 0, e = 0, qb = a->ref_begin1, pb = a->read_begin1; uint32_t i; while (e < a->cigarLen || left > 0) { int32_t count = 0; int32_t q = qb; int32_t p = pb; fprintf(stdout, "Target: %8d ", q + 1); for (c = e; c < a->cigarLen; ++c) { char letter = cigar_int_to_op(a->cigar[c]); uint32_t length = cigar_int_to_len(a->cigar[c]); uint32_t l = (count == 0 && left > 0) ? left: length; for (i = 0; i < l; ++i) { if (letter == 'I') fprintf(stdout, "-"); else { fprintf(stdout, "%c", *(ref_seq->seq.s + q)); ++ q; } ++ count; if (count == 60) goto step2; } } step2: fprintf(stdout, " %d\n ", q); q = qb; count = 0; for (c = e; c < a->cigarLen; ++c) { char letter = cigar_int_to_op(a->cigar[c]); uint32_t length = cigar_int_to_len(a->cigar[c]); uint32_t l = (count == 0 && left > 0) ? left: length; for (i = 0; i < l; ++i){ if (letter == 'M') { if (table[(int)*(ref_seq->seq.s + q)] == table[(int)*(read_seq + p)])fprintf(stdout, "|"); else fprintf(stdout, "*"); ++q; ++p; } else { fprintf(stdout, " "); if (letter == 'I') ++p; else ++q; } ++ count; if (count == 60) { qb = q; goto step3; } } } step3: p = pb; fprintf(stdout, "\nQuery: %8d ", p + 1); count = 0; for (c = e; c < a->cigarLen; ++c) { char letter = cigar_int_to_op(a->cigar[c]); uint32_t length = cigar_int_to_len(a->cigar[c]); uint32_t l = (count == 0 && left > 0) ? left: length; for (i = 0; i < l; ++i) { if (letter == 'D') fprintf(stdout, "-"); else { fprintf(stdout, "%c", *(read_seq + p)); ++p; } ++ count; if (count == 60) { pb = p; left = l - i - 1; e = (left == 0) ? (c + 1) : c; goto end; } } } e = c; left = 0; end: fprintf(stdout, " %d\n\n", p); } } } void genSSWData(int niter, int numsample, kseq_t **read, kseq_t **ref){ //seed randrom to get more chaotic output srand (time(NULL)); kseq_t *testread = (kseq_t *)malloc(sizeof(kseq_t)*niter*numsample); kseq_t *testref = (kseq_t *)malloc(sizeof(kseq_t)*niter*numsample); int ii; for(ii =0; ii < niter*numsample; ++ii){ genSeq(&testread[ii], &testref[ii]); } *read = testread; *ref = testref; } void deleteSSWData(int niter, int numsample, kseq_t **read, kseq_t **ref){ kseq_t *testread = *read; kseq_t *testref = *ref; int ii; for(ii =0; ii < niter*numsample; ++ii){ freeSeq(&testread[ii]); freeSeq(&testref[ii]); } free(*read); free(*ref); } float SSW(int numsample, int tid, kseq_t *read, kseq_t *ref, unsigned int *maxr, unsigned int *maxc, unsigned int *maxv){ clock_t start, end; float cpu_time; kseq_t *read_seq, *ref_seq; int32_t l, m, k, match = 2, mismatch = 2, gap_open = 3, gap_extension = 1, path = 0, n = 5, s1 = 67108864, s2 = 128, filter = 0; int8_t* mata = (int8_t*)calloc(25, sizeof(int8_t)); const int8_t* mat = mata; int8_t* ref_num = (int8_t*)malloc(s1); int8_t* num = (int8_t*)malloc(s2), *num_rc = 0; char* read_rc = 0; int total = numsample; float total_cups = 0; /* This table is used to transform nucleotide letters into numbers. */ int8_t nt_table[128] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; int8_t* table = nt_table; fprintf(stdout, "Processing %d samples using Intel Vector Instruction Set in Thread %d\n", numsample, tid); // initialize scoring matrix for genome sequences for (l = k = 0; LIKELY(l < 4); ++l) { for (m = 0; LIKELY(m < 4); ++m) mata[k++] = l == m ? match : -mismatch; /* weight_match : -weight_mismatch */ mata[k++] = 0; // ambiguous base } for (m = 0; LIKELY(m < 5); ++m) mata[k++] = 0; // alignment start = clock(); int ii; for(ii =0; ii < total; ++ii){ // clear screen newline //printf("\033[2J\033[1;1H"); read_seq = &read[ii]; ref_seq = &ref[ii]; { s_profile* p = 0; int32_t readLen = read_seq->seq.l; int32_t maskLen = readLen / 2; while (readLen >= s2) { ++s2; kroundup32(s2); num = (int8_t*)realloc(num, s2); } for (m = 0; m < readLen; ++m) num[m] = table[(int)read_seq->seq.s[m]]; p = ssw_init(num, readLen, mat, n, 2); { s_align* result, *result_rc = 0; int32_t refLen = ref_seq->seq.l; int8_t flag = 0; while (refLen > s1) { ++s1; kroundup32(s1); ref_num = (int8_t*)realloc(ref_num, s1); } for (m = 0; m < refLen; ++m) ref_num[m] = table[(int)ref_seq->seq.s[m]]; if (path == 1) flag = 2; result = ssw_align (p, ref_num, refLen, gap_open, gap_extension, flag, filter, 0, maskLen, &total_cups, &maxr[ii], &maxc[ii], &maxv[ii]); if (result_rc && result_rc->score1 > result->score1 && result_rc->score1 >= filter) { ssw_write (result_rc, ref_seq, read_seq, read_rc, table, 1); }else if (result && result->score1 >= filter){ ssw_write(result, ref_seq, read_seq, read_seq->seq.s, table, 0); } else if (! result) return 1; if (result_rc) align_destroy(result_rc); align_destroy(result); } init_destroy(p); } } end = clock(); cpu_time = ((float) (end - start)) / CLOCKS_PER_SEC; float cups = (float)numsample*256*128/cpu_time; //fprintf(stdout, "Total Samples Processed %d in Thread %d\n", numsample, tid); //fprintf(stdout, "Net GCups: %.3f\n", cups/1e9); //fprintf(stdout, "Average Compute GCups: %.3f\n", total_cups/(numsample)); //fprintf(stdout, "CPU time: %f seconds\n", cpu_time); if (num_rc) { free(num_rc); free(read_rc); } //kseq_destroy(read_seq); free(num); free(ref_num); free(mata); return total_cups/(numsample); } long xgetusec() { struct timeval tval_result; gettimeofday(&tval_result, NULL); long retval = tval_result.tv_sec*1e6+tval_result.tv_usec; return retval; } int SSW_par(int nblocks, int nSamples, int nThreads, char **rd, char **rf, unsigned int *maxr, unsigned int *maxc, unsigned int *maxv){ int i; omp_set_num_threads(nThreads); kseq_t *read, *ref; clock_t start, end; start = clock(); printf("Generating samples\n"); genSSWData(nblocks, nSamples, &read, &ref); printf("Done generating %d samples\n", nblocks*nSamples); end = clock(); float cpu_time_read = ((float) (end - start)) / CLOCKS_PER_SEC; printf("Time to generate Samples Secs: %f\n", (float)cpu_time_read); printf("Distributing samples on %d threads\n", nThreads); double ostart = omp_get_wtime(); int ID; int nIter = nThreads; int samples = nblocks*nSamples/nIter; #pragma omp parallel for for(i = 0; i < nIter; ++i) { ID = omp_get_thread_num(); SSW(samples, ID, (read + i*samples), (ref + i*samples), (maxr + i*samples), (maxc + i*samples), (maxv + i*samples)); } double oend = omp_get_wtime(); float Gsamples = 256*128; Gsamples = Gsamples*nSamples*nblocks; Gsamples = Gsamples/(1024*1024*1024); float Gcups = Gsamples/(float)(oend-ostart); printf("Total Cell Updates(G)=%f\n", Gsamples); printf("Total Threads=%d\n", nThreads); printf("Time to complete computation Secs: %f\n", (float)(oend - ostart)); printf("Cell updates per second(GCups)=%f\n", Gcups); for(i = 0; i < nblocks*nSamples; ++i){ strcpy(rd[i], read[i].seq.s); strcpy(rf[i], ref[i].seq.s); } deleteSSWData(nblocks, nSamples, &read, &ref); return 0; } /* int main (int argc, char * const argv[]) { clock_t start, end; float cpu_time; kseq_t *read_seq, *ref_seq; int32_t l, m, k, match = 2, mismatch = 2, gap_open = 3, gap_extension = 1, path = 0, n = 5, s1 = 67108864, s2 = 128, filter=0; int8_t* mata = (int8_t*)calloc(25, sizeof(int8_t)); int8_t nt_table[128] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; // Parse command line. while ((l = getopt(argc, argv, "m:x:o:e:a:f:pcrsh")) >= 0) { switch (l) { case 'm': match = atoi(optarg); break; case 'x': mismatch = atoi(optarg); break; case 'o': gap_open = atoi(optarg); break; case 'e': gap_extension = atoi(optarg); break; case 'f': filter = atoi(optarg); break; case 'c': path = 1; break; } } if (0 && optind + 2 > argc) { fprintf(stderr, "\n"); fprintf(stderr, "Usage: ssw_test [options] ... <target.fasta> <query.fasta>(or <query.fastq>)\n"); fprintf(stderr, "Options:\n"); fprintf(stderr, "\t-m N\tN is a positive integer for weight match in genome sequence alignment. [default: 2]\n"); fprintf(stderr, "\t-x N\tN is a positive integer. -N will be used as weight mismatch in genome sequence alignment. [default: 2]\n"); fprintf(stderr, "\t-o N\tN is a positive integer. -N will be used as the weight for the gap opening. [default: 3]\n"); fprintf(stderr, "\t-e N\tN is a positive integer. -N will be used as the weight for the gap extension. [default: 1]\n"); fprintf(stderr, "\t-c\tReturn the alignment path.\n"); fprintf(stderr, "\t-f N\tN is a positive integer. Only output the alignments with the Smith-Waterman score >= N.\n"); return 1; } SSW(); } */
zgetrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include "plasma_tuning.h" /***************************************************************************//** * ******************************************************************************/ int plasma_zgetrf(int m, int n, plasma_complex64_t *pA, int lda, int *ipiv) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imin(m, n) == 0) return PlasmaSuccess; // Tune parameters. // if (plasma->tuning) // plasma_tune_getrf(plasma, PlasmaComplexDouble, m, n); int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); } #pragma omp parallel #pragma omp master { // Call the tile async function. plasma_omp_zgetrf(A, ipiv, sequence, &request); } #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_zgetrf(plasma_desc_t A, int *ipiv, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0) return; // Call the parallel function. plasma_pzgetrf(A, ipiv, sequence, request); }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
spfilter.h
//-------------------------------------------------------------------------------- // Copyright (c) 2017-2020, sanko-shoko. All rights reserved. //-------------------------------------------------------------------------------- #ifndef __SP_FILTER_H__ #define __SP_FILTER_H__ #include "spcore/spcore.h" namespace sp{ //-------------------------------------------------------------------------------- // filter //-------------------------------------------------------------------------------- template <typename TYPE, typename ELEM = TYPE, typename TYPE0, typename ELEM0 = ELEM> SP_CPUFUNC void filter2d(Mem<TYPE> &dst, const Mem<TYPE0> &src, const Mem<SP_REAL> &kernel){ SP_ASSERT(checkPtr(src, 2) && checkPtr(kernel, 2)); dst.resize(2, src.dsize); const Mem<TYPE0> &tmp = (reinterpret_cast<const Mem<TYPE0>*>(&dst) != &src) ? src : clone(src); const int ch = sizeof(TYPE) / sizeof(ELEM); const int halfX = kernel.dsize[0] / 2; const int halfY = kernel.dsize[1] / 2; const Rect2 rect = getRect2(dst.dsize); #if SP_USE_OMP #pragma omp parallel for #endif for (int v = 0; v < dst.dsize[1]; v++){ for (int u = 0; u < dst.dsize[0]; u++){ for (int c = 0; c < ch; c++){ double sum = 0.0, div = 0.0; for (int ky = -halfY; ky <= halfY; ky++){ for (int kx = -halfX; kx <= halfX; kx++){ if (inRect(rect, u + kx, v + ky) == false) continue; const ELEM0 &val = acs2<TYPE0, ELEM0>(tmp, u + kx, v + ky, c); const SP_REAL s = acs2(kernel, kx + halfX, ky + halfY); sum += s * val; div += fabs(s); } } cnvVal(acs2<TYPE, ELEM>(dst, u, v, c), (div > 0.0) ? sum / div : 0.0); } } } } template <typename TYPE, typename ELEM = TYPE, typename TYPE0, typename ELEM0 = ELEM> SP_CPUFUNC void filterX(Mem<TYPE> &dst, const Mem<TYPE0> &src, const Mem<SP_REAL> &kernel){ dst.resize(2, src.dsize); const Mem2<TYPE> &tmp = (reinterpret_cast<const Mem<TYPE0>*>(&dst) != &src) ? src : clone(src); const int ch = sizeof(TYPE) / sizeof(ELEM); const int halfX = kernel.dsize[0] / 2; const Rect2 rect = getRect2(dst.dsize); #if SP_USE_OMP #pragma omp parallel for #endif for (int v = 0; v < dst.dsize[1]; v++){ for (int u = 0; u < dst.dsize[0]; u++){ for (int c = 0; c < ch; c++){ double sum = 0.0, div = 0.0; for (int kx = -halfX; kx <= halfX; kx++){ if (inRect(rect, u + kx, v) == false) continue; const ELEM0 &val = acs2<TYPE0, ELEM0>(tmp, u + kx, v, c); const SP_REAL s = acs1(kernel, kx + halfX); sum += s * val; div += fabs(s); } acs2<TYPE, ELEM>(dst, u, v, c) = cast<ELEM>((div > 0.0) ? sum / div : 0.0); } } } } template <typename TYPE, typename ELEM = TYPE, typename TYPE0, typename ELEM0 = ELEM> SP_CPUFUNC void filterY(Mem<TYPE> &dst, const Mem<TYPE0> &src, const Mem<SP_REAL> &kernel){ dst.resize(2, src.dsize); const Mem<TYPE> &tmp = (reinterpret_cast<const Mem<TYPE0>*>(&dst) != &src) ? src : clone(src); const int ch = sizeof(TYPE) / sizeof(ELEM); const int halfY = kernel.dsize[0] / 2; const Rect2 rect = getRect2(dst.dsize); #if SP_USE_OMP #pragma omp parallel for #endif for (int v = 0; v < dst.dsize[1]; v++){ for (int u = 0; u < dst.dsize[0]; u++){ for (int c = 0; c < ch; c++){ double sum = 0.0, div = 0.0; for (int ky = -halfY; ky <= halfY; ky++){ if (inRect(rect, u, v + ky) == false) continue; const ELEM0 &val = acs2<TYPE0, ELEM0>(tmp, u, v + ky, c); const SP_REAL s = acs1(kernel, ky + halfY); sum += s * val; div += fabs(s); } acs2<TYPE, ELEM>(dst, u, v, c) = cast<ELEM>((div > 0.0) ? sum / div : 0.0); } } } } //-------------------------------------------------------------------------------- // gaussian filter //-------------------------------------------------------------------------------- // filter window size <-> gaussian sigma // // half = (window size) / 2 // sigma = 0.3 * (half - 1) + 0.8 // half = round((sigma - 0.8) / 0.3 + 1) template <typename TYPE, typename ELEM = TYPE> SP_CPUFUNC void gaussianFilter(Mem<TYPE> &dst, const Mem<TYPE> &src, const double sigma = 0.8){ const int half = max(1, round((sigma - 0.8) / 0.3 + 1)); Mem1<SP_REAL> kernel(2 * half + 1); for (int k = -half; k <= half; k++){ const double r = k * k; kernel(k + half) = exp(-r / (2.0 * sq(sigma))); } Mem2<TYPE> tmp; filterX<TYPE, ELEM>(tmp, src, kernel); filterY<TYPE, ELEM>(dst, tmp, kernel); } template <typename TYPE, typename TYPE0> SP_CPUFUNC void gaussianFilter3x3(Mem<TYPE> &dst, const Mem<TYPE0> &src) { const Mem<TYPE0> &tmp = (reinterpret_cast<const Mem<TYPE0>*>(&dst) != &src) ? src : clone(src); const int dsize0 = src.dsize[0]; const int dsize1 = src.dsize[1]; const int dsize[2] = { dsize0, dsize1 }; dst.resize(2, dsize); const TYPE0 *psrc = tmp.ptr; TYPE *pdst = dst.ptr; for (int v = 0; v < dsize1; v++) { const int v0 = v + ((v == 0) ? 0 : -1); const int v1 = v + 0; const int v2 = v + ((v == dsize1 - 1) ? 0 : +1); const TYPE0 *psrc0 = &psrc[v0 * dsize0]; const TYPE0 *psrc1 = &psrc[v1 * dsize0]; const TYPE0 *psrc2 = &psrc[v2 * dsize0]; TYPE *pd = &pdst[v * dsize0]; for (int u = 0; u < dsize0; u++) { const int u0 = u + ((u == 0) ? 0 : -1); const int u1 = u + 0; const int u2 = u + ((u == dsize0 - 1) ? 0 : +1); const TYPE0 a00 = psrc0[u0]; const TYPE0 a01 = psrc0[u1]; const TYPE0 a02 = psrc0[u2]; const TYPE0 a10 = psrc1[u0]; const TYPE0 a11 = psrc1[u1]; const TYPE0 a12 = psrc1[u2]; const TYPE0 a20 = psrc2[u0]; const TYPE0 a21 = psrc2[u1]; const TYPE0 a22 = psrc2[u2]; const double d = (a00 + 2.0 * a01 + a02) + 2.0 * (a10 + 2.0 * a11 + a12) + (a20 + 2.0 * a21 + a22); *pd++ = cast<TYPE>(d / 16.0); } } } //-------------------------------------------------------------------------------- // box filter //-------------------------------------------------------------------------------- template <typename TYPE, typename ELEM = TYPE> SP_CPUFUNC void boxFilter(Mem<TYPE> &dst, const Mem<TYPE> &src, const int winSize) { Mem1<SP_REAL> kernel(winSize); for (int k = 0; k < winSize; k++) { kernel(k) = static_cast<SP_REAL>(1.0); } Mem2<TYPE> tmp; filterX<TYPE, ELEM>(tmp, src, kernel); filterY<TYPE, ELEM>(dst, tmp, kernel); } template <typename TYPE, typename TYPE0> SP_CPUFUNC void boxFilter3x3(Mem<TYPE> &dst, const Mem<TYPE0> &src) { const Mem<TYPE0> &tmp = (reinterpret_cast<const Mem<TYPE0>*>(&dst) != &src) ? src : clone(src); const int dsize0 = src.dsize[0]; const int dsize1 = src.dsize[1]; const int dsize[2] = { dsize0, dsize1 }; dst.resize(2, dsize); const TYPE0 *psrc = tmp.ptr; TYPE *pdst = dst.ptr; for (int v = 0; v < dsize1; v++) { const int v0 = v + ((v == 0) ? 0 : -1); const int v1 = v + 0; const int v2 = v + ((v == dsize1 - 1) ? 0 : +1); const TYPE0 *psrc0 = &psrc[v0 * dsize0]; const TYPE0 *psrc1 = &psrc[v1 * dsize0]; const TYPE0 *psrc2 = &psrc[v2 * dsize0]; TYPE *pd = &pdst[v * dsize0]; for (int u = 0; u < dsize0; u++) { const int u0 = u + ((u == 0) ? 0 : -1); const int u1 = u + 0; const int u2 = u + ((u == dsize0 - 1) ? 0 : +1); const TYPE0 a00 = psrc0[u0]; const TYPE0 a01 = psrc0[u1]; const TYPE0 a02 = psrc0[u2]; const TYPE0 a10 = psrc1[u0]; const TYPE0 a11 = psrc1[u1]; const TYPE0 a12 = psrc1[u2]; const TYPE0 a20 = psrc2[u0]; const TYPE0 a21 = psrc2[u1]; const TYPE0 a22 = psrc2[u2]; const SP_REAL d = (a00 + a01 + a02 + a10 + a11 + a12 + a20 + a21 + a22) / 9.0; cnvVal(*pd++, d); } } } //-------------------------------------------------------------------------------- // max/min filter //-------------------------------------------------------------------------------- template <typename TYPE> SP_CPUFUNC void maxFilter(Mem<TYPE> &dst, const Mem<TYPE> &src, const int winSize) { dst.resize(2, src.dsize); const Mem<TYPE> &tmp = (&dst != &src) ? src : clone(src); const int offset = winSize / 2; for (int v = 0; v < dst.dsize[1]; v++) { for (int u = 0; u < dst.dsize[0]; u++) { TYPE maxv = acs2(tmp, u, v); for (int ky = 0; ky < winSize; ky++){ for (int kx = 0; kx < winSize; kx++) { const TYPE &val = acs2(tmp, u + kx - offset, v + ky - offset); maxv = max(maxv, val); } } acs2(dst, u, v) = cast<TYPE>(maxv); } } } template <typename TYPE> SP_CPUFUNC void minFilter(Mem<TYPE> &dst, const Mem<TYPE> &src, const int winSize) { SP_ASSERT(checkPtr(src, 2)); dst.resize(2, src.dsize); const Mem<TYPE> &tmp = (&dst != &src) ? src : clone(src); const int offset = winSize / 2; for (int v = 0; v < dst.dsize[1]; v++) { for (int u = 0; u < dst.dsize[0]; u++) { TYPE minv = acs2(tmp, u, v); for (int ky = 0; ky < winSize; ky++) { for (int kx = 0; kx < winSize; kx++) { const TYPE &val = acs2(tmp, u + kx - offset, v + ky - offset); minv = min(minv, val); } } cnvVal(acs2(dst, u, v), minv); } } } //-------------------------------------------------------------------------------- // laplacian filter //-------------------------------------------------------------------------------- template <typename TYPE, typename TYPE0> SP_CPUFUNC void laplacianFilter(Mem<TYPE> &dst, const Mem<TYPE0> &src, const double sigma = 0.8){ SP_ASSERT(checkPtr(src, 2)); const int half = max(1, round((sigma - 0.8) / 0.3 + 1)); Mem2<SP_REAL> kernel(2 * half + 1, 2 * half + 1); for (int y = -half; y <= half; y++){ for (int x = -half; x <= half; x++){ const double r = x * x + y * y; kernel(x + half, y + half) = static_cast<SP_REAL>((r - 2 * sq(sigma)) * exp(-r / (2 * sq(sigma)))); } } filter2d(dst, src, kernel); } template <typename TYPE, typename TYPE0> SP_CPUFUNC void laplacianFilter3x3(Mem<TYPE> &dst, const Mem<TYPE0> &src) { SP_ASSERT(checkPtr(src, 2)); const Mem<TYPE0> &tmp = (reinterpret_cast<const Mem<TYPE0>*>(&dst) != &src) ? src : clone(src); const int dsize0 = src.dsize[0]; const int dsize1 = src.dsize[1]; const int dsize[2] = { dsize0, dsize1 }; dst.resize(2, dsize); const TYPE0 *psrc = tmp.ptr; TYPE *pdst = dst.ptr; for (int v = 0; v < dsize1; v++) { const int v0 = v + ((v == 0) ? 0 : -1); const int v1 = v + 0; const int v2 = v + ((v == dsize1 - 1) ? 0 : +1); const TYPE0 *psrc0 = &psrc[v0 * dsize0]; const TYPE0 *psrc1 = &psrc[v1 * dsize0]; const TYPE0 *psrc2 = &psrc[v2 * dsize0]; TYPE *pd = &pdst[v * dsize0]; for (int u = 0; u < dsize0; u++) { const int u0 = u + ((u == 0) ? 0 : -1); const int u1 = u + 0; const int u2 = u + ((u == dsize0 - 1) ? 0 : +1); const TYPE0 a00 = psrc0[u0]; const TYPE0 a01 = psrc0[u1]; const TYPE0 a02 = psrc0[u2]; const TYPE0 a10 = psrc1[u0]; const TYPE0 a11 = psrc1[u1]; const TYPE0 a12 = psrc1[u2]; const TYPE0 a20 = psrc2[u0]; const TYPE0 a21 = psrc2[u1]; const TYPE0 a22 = psrc2[u2]; const double d = (8 * a11 - (a00 + a01 + a02 + a10 + a12 + a20 + a21 + a22)) / 16.0; cnvVal(*pd++, d); } } } //-------------------------------------------------------------------------------- // sobel filter //-------------------------------------------------------------------------------- template <typename TYPE, typename TYPE0> SP_CPUFUNC void sobelFilter3x3(Mem<TYPE> &dX, Mem<TYPE> &dY, const Mem<TYPE0> &src) { const int dsize0 = src.dsize[0]; const int dsize1 = src.dsize[1]; const int dsize[2] = { dsize0, dsize1 }; dX.resize(2, dsize); dY.resize(2, dsize); const TYPE0 *psrc = src.ptr; TYPE *pdx = dX.ptr; TYPE *pdy = dY.ptr; for (int v = 0; v < dsize1; v++) { const int v0 = v + ((v == 0) ? 0 : -1); const int v1 = v + 0; const int v2 = v + ((v == dsize1 - 1) ? 0 : +1); const TYPE0 *psrc0 = &psrc[v0 * dsize0]; const TYPE0 *psrc1 = &psrc[v1 * dsize0]; const TYPE0 *psrc2 = &psrc[v2 * dsize0]; for (int u = 0; u < dsize0; u++) { const int u0 = u + ((u == 0) ? 0 : -1); const int u1 = u + 0; const int u2 = u + ((u == dsize0 - 1) ? 0 : +1); const TYPE0 a00 = psrc0[u0]; const TYPE0 a01 = psrc0[u1]; const TYPE0 a02 = psrc0[u2]; const TYPE0 a10 = psrc1[u0]; const TYPE0 a11 = psrc1[u1]; const TYPE0 a12 = psrc1[u2]; const TYPE0 a20 = psrc2[u0]; const TYPE0 a21 = psrc2[u1]; const TYPE0 a22 = psrc2[u2]; const double dx = ((a02 + 2 * a12 + a22) - (a00 + 2 * a10 + a20)) / 8.0; const double dy = ((a20 + 2 * a21 + a22) - (a00 + 2 * a01 + a02)) / 8.0; *pdx++ = cast<TYPE>(dx); *pdy++ = cast<TYPE>(dy); } } } //-------------------------------------------------------------------------------- // scharr filter //-------------------------------------------------------------------------------- template <typename TYPE, typename TYPE0> SP_CPUFUNC void scharrFilter3x3(Mem<TYPE> &dX, Mem<TYPE> &dY, const Mem<TYPE0> &src) { const int dsize0 = src.dsize[0]; const int dsize1 = src.dsize[1]; const int dsize[2] = { dsize0, dsize1 }; dX.resize(2, dsize); dY.resize(2, dsize); const TYPE0 *psrc = src.ptr; TYPE *pdx = dX.ptr; TYPE *pdy = dY.ptr; for (int v = 0; v < dsize1; v++) { const int v0 = v + ((v == 0) ? 0 : -1); const int v1 = v + 0; const int v2 = v + ((v == dsize1 - 1) ? 0 : +1); const TYPE0 *psrc0 = &psrc[v0 * dsize0]; const TYPE0 *psrc1 = &psrc[v1 * dsize0]; const TYPE0 *psrc2 = &psrc[v2 * dsize0]; for (int u = 0; u < dsize0; u++) { const int u0 = u + ((u == 0) ? 0 : -1); const int u1 = u + 0; const int u2 = u + ((u == dsize0 - 1) ? 0 : +1); const TYPE0 a00 = psrc0[u0]; const TYPE0 a01 = psrc0[u1]; const TYPE0 a02 = psrc0[u2]; const TYPE0 a10 = psrc1[u0]; const TYPE0 a11 = psrc1[u1]; const TYPE0 a12 = psrc1[u2]; const TYPE0 a20 = psrc2[u0]; const TYPE0 a21 = psrc2[u1]; const TYPE0 a22 = psrc2[u2]; const double dx = ((3 * a02 + 10 * a12 + 3 * a22) - (3 * a00 + 10 * a10 + 3 * a20)) / 32.0; const double dy = ((3 * a20 + 10 * a21 + 3 * a22) - (3 * a00 + 10 * a01 + 3 * a02)) / 32.0; *pdx++ = cast<TYPE>(dx); *pdy++ = cast<TYPE>(dy); } } } //-------------------------------------------------------------------------------- // median filter //-------------------------------------------------------------------------------- template <typename TYPE, typename ELEM = TYPE> SP_CPUFUNC void medianFilter(Mem<TYPE> &dst, const Mem<TYPE> &src, const int winSize) { dst.resize(2, src.dsize); const Mem<TYPE> &tmp = (&dst != &src) ? src : clone(src); const int offset = winSize / 2; const int ch = sizeof(TYPE) / sizeof(ELEM); Mem1<ELEM> list; list.resize(winSize * winSize); for (int v = 0; v < dst.dsize[1]; v++) { for (int u = 0; u < dst.dsize[0]; u++) { for (int c = 0; c < ch; c++) { for (int ky = 0; ky < winSize; ky++) { for (int kx = 0; kx < winSize; kx++) { ELEM val = acs2<TYPE, ELEM>(src, u + kx - offset, v + ky - offset, c); list[ky * winSize + kx] = val; } } acs2<TYPE, ELEM>(dst, u, v, c) = median(list); } } } } //-------------------------------------------------------------------------------- // normalize filter //-------------------------------------------------------------------------------- template <typename TYPE, typename ELEM = TYPE> SP_CPUFUNC void normalizeFilter(Mem<TYPE> &dst, const Mem<TYPE> &src, const int winSize, const int maxv = SP_BYTEMAX) { dst.resize(2, src.dsize); Mem2<TYPE> tmp; { Mem1<SP_REAL> kernel(winSize); for (int k = 0; k < winSize; k++) { kernel(k) = static_cast<SP_REAL>(1.0); } filterX<TYPE, ELEM>(tmp, src, kernel); filterY<TYPE, ELEM>(tmp, tmp, kernel); } const int ch = sizeof(TYPE) / sizeof(ELEM); for (int v = 0; v < dst.dsize[1]; v++) { for (int u = 0; u < dst.dsize[0]; u++) { for (int c = 0; c < ch; c++) { acs2<TYPE, ELEM>(dst, u, v, c) = (acs2<TYPE, ELEM>(src, u, v, c) - acs2<TYPE, ELEM>(tmp, u, v, c) + maxv) / 2 ; } } } } //-------------------------------------------------------------------------------- // bilateral filter //-------------------------------------------------------------------------------- template <typename TYPE, typename ELEM = TYPE> SP_CPUFUNC void bilateralFilter(Mem<TYPE> &dst, const Mem<TYPE> &src, const double sigma_s, const double sigma_c){ dst.resize(2, src.dsize); const Mem<TYPE> &tmp = (&dst != &src) ? src : clone(src); const int half = max(1, round((sigma_s - 0.8) / 0.3 + 1)); Mem2<SP_REAL> kernel(2 * half + 1, 2 * half + 1); for (int y = -half; y <= +half; y++) { for (int x = -half; x <= +half; x++) { const SP_REAL r2 = sq(x) + sq(y); kernel(x + half, y + half) = exp(-r2 / (2.0 * sq(sigma_s))); } } const SP_REAL expscale = 10.0; Mem1<SP_REAL> exptable(100); for (int i = 0; i < exptable.size(); i++){ const double r = sq(i / expscale); const double v = exp(-r / 2.0); exptable[i] = static_cast<SP_REAL>(v); } const Rect2 rect = getRect2(dst.dsize); const int ch = sizeof(TYPE) / sizeof(ELEM); #if SP_USE_OMP #pragma omp parallel for #endif for (int v = 0; v < dst.dsize[1]; v++){ for (int u = 0; u < dst.dsize[0]; u++){ for (int c = 0; c < ch; c++) { const ELEM base = acs2<TYPE, ELEM>(tmp, u, v, c); SP_REAL sum = 0.0, div = 0.0; for (int ky = -half; ky <= +half; ky++){ for (int kx = -half; kx <= +half; kx++){ if (inRect(rect, u + kx, v + ky) == false) continue; const ELEM &val = acs2<TYPE, ELEM>(tmp, u + kx, v + ky, c); const SP_REAL a = kernel(kx + half, ky + half); const SP_REAL b = exptable(round(fabs(val - base) * expscale / sigma_c)); sum += a * b * val; div += a * b; } } acs2<TYPE, ELEM>(dst, u, v, c) = cast<ELEM>(sum / div); } } } } } #endif
convolution_1x1_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_packn_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * packn; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(outptr, _val, vl); r0 += packn * 2; outptr += packn; } r0 += tailstep; } } conv1x1s1_sgemm_packn_fp16sa_rvv(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }